text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# coding=utf-8 # Copyright 2022 Intel Labs, OpenMMLab and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch DPT (Dense Prediction Transformers) model. This implementation is heavily inspired by OpenMMLab's implementation, found here: https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/decode_heads/dpt_head.py. """ import collections.abc import math from dataclasses import dataclass from typing import List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput, SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, logging, torch_int from ...utils.backbone_utils import load_backbone from .configuration_dpt import DPTConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "DPTConfig" # Base docstring _CHECKPOINT_FOR_DOC = "Intel/dpt-large" _EXPECTED_OUTPUT_SHAPE = [1, 577, 1024] @dataclass class BaseModelOutputWithIntermediateActivations(ModelOutput): """ Base class for model's outputs that also contains intermediate activations that can be used at later stages. Useful in the context of Vision models.: Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. intermediate_activations (`tuple(torch.FloatTensor)`, *optional*): Intermediate activations that can be used to compute hidden states of the model at various layers. """ last_hidden_states: torch.FloatTensor = None intermediate_activations: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPoolingAndIntermediateActivations(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states as well as intermediate activations that can be used by the model at later stages. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. intermediate_activations (`tuple(torch.FloatTensor)`, *optional*): Intermediate activations that can be used to compute hidden states of the model at various layers. """ last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None intermediate_activations: Optional[Tuple[torch.FloatTensor, ...]] = None class DPTViTHybridEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config, feature_size=None): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.backbone = load_backbone(config) feature_dim = self.backbone.channels[-1] if len(self.backbone.channels) != 3: raise ValueError(f"Expected backbone to have 3 output features, got {len(self.backbone.channels)}") self.residual_feature_map_index = [0, 1] # Always take the output of the first and second backbone stage if feature_size is None: feat_map_shape = config.backbone_featmap_shape feature_size = feat_map_shape[-2:] feature_dim = feat_map_shape[1] else: feature_size = ( feature_size if isinstance(feature_size, collections.abc.Iterable) else (feature_size, feature_size) ) feature_dim = self.backbone.channels[-1] self.image_size = image_size self.patch_size = patch_size[0] self.num_channels = num_channels self.projection = nn.Conv2d(feature_dim, hidden_size, kernel_size=1) self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1): posemb_tok = posemb[:, :start_index] posemb_grid = posemb[0, start_index:] old_grid_size = int(math.sqrt(len(posemb_grid))) posemb_grid = posemb_grid.reshape(1, old_grid_size, old_grid_size, -1).permute(0, 3, 1, 2) posemb_grid = nn.functional.interpolate(posemb_grid, size=(grid_size_height, grid_size_width), mode="bilinear") posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, grid_size_height * grid_size_width, -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def forward( self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False, return_dict: bool = False ) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) position_embeddings = self._resize_pos_embed( self.position_embeddings, height // self.patch_size, width // self.patch_size ) backbone_output = self.backbone(pixel_values) features = backbone_output.feature_maps[-1] # Retrieve also the intermediate activations to use them at later stages output_hidden_states = [backbone_output.feature_maps[index] for index in self.residual_feature_map_index] embeddings = self.projection(features).flatten(2).transpose(1, 2) cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token embeddings = embeddings + position_embeddings if not return_dict: return (embeddings, output_hidden_states) # Return hidden states and intermediate activations return BaseModelOutputWithIntermediateActivations( last_hidden_states=embeddings, intermediate_activations=output_hidden_states, ) class DPTViTEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. """ def __init__(self, config): super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.patch_embeddings = DPTViTPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1): posemb_tok = posemb[:, :start_index] posemb_grid = posemb[0, start_index:] old_grid_size = torch_int(posemb_grid.size(0) ** 0.5) posemb_grid = posemb_grid.reshape(1, old_grid_size, old_grid_size, -1).permute(0, 3, 1, 2) posemb_grid = nn.functional.interpolate(posemb_grid, size=(grid_size_height, grid_size_width), mode="bilinear") posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, grid_size_height * grid_size_width, -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def forward(self, pixel_values, return_dict=False): batch_size, num_channels, height, width = pixel_values.shape # possibly interpolate position encodings to handle varying image sizes patch_size = self.config.patch_size position_embeddings = self._resize_pos_embed( self.position_embeddings, height // patch_size, width // patch_size ) embeddings = self.patch_embeddings(pixel_values) batch_size, seq_len, _ = embeddings.size() # add the [CLS] token to the embedded patch tokens cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token embeddings = embeddings + position_embeddings embeddings = self.dropout(embeddings) if not return_dict: return (embeddings,) return BaseModelOutputWithIntermediateActivations(last_hidden_states=embeddings) class DPTViTPatchEmbeddings(nn.Module): """ Image to Patch Embedding. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values): batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->DPT class DPTViTSelfAttention(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->DPT class DPTViTSelfOutput(nn.Module): """ The residual connection is defined in DPTLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: DPTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class DPTViTAttention(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.attention = DPTViTSelfAttention(config) self.output = DPTViTSelfOutput(config) self.pruned_heads = set() # Copied from transformers.models.vit.modeling_vit.ViTAttention.prune_heads def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) # Copied from transformers.models.vit.modeling_vit.ViTAttention.forward def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->DPT class DPTViTIntermediate(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->DPT class DPTViTOutput(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # copied from transformers.models.vit.modeling_vit.ViTLayer with ViTConfig->DPTConfig, ViTAttention->DPTViTAttention, ViTIntermediate->DPTViTIntermediate, ViTOutput->DPTViTOutput class DPTViTLayer(nn.Module): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: DPTConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = DPTViTAttention(config) self.intermediate = DPTViTIntermediate(config) self.output = DPTViTOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in ViT, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs # copied from transformers.models.vit.modeling_vit.ViTEncoder with ViTConfig -> DPTConfig, ViTLayer->DPTViTLayer class DPTViTEncoder(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([DPTViTLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class DPTReassembleStage(nn.Module): """ This class reassembles the hidden states of the backbone into image-like feature representations at various resolutions. This happens in 3 stages: 1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to `config.readout_type`. 2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`. 3. Resizing the spatial dimensions (height, width). Args: config (`[DPTConfig]`): Model configuration class defining the model architecture. """ def __init__(self, config): super().__init__() self.config = config self.layers = nn.ModuleList() if config.is_hybrid: self._init_reassemble_dpt_hybrid(config) else: self._init_reassemble_dpt(config) self.neck_ignore_stages = config.neck_ignore_stages def _init_reassemble_dpt_hybrid(self, config): r""" " For DPT-Hybrid the first 2 reassemble layers are set to `nn.Identity()`, please check the official implementation: https://github.com/isl-org/DPT/blob/f43ef9e08d70a752195028a51be5e1aff227b913/dpt/vit.py#L438 for more details. """ for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors): if i <= 1: self.layers.append(nn.Identity()) elif i > 1: self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor)) if config.readout_type != "project": raise ValueError(f"Readout type {config.readout_type} is not supported for DPT-Hybrid.") # When using DPT-Hybrid the readout type is set to "project". The sanity check is done on the config file self.readout_projects = nn.ModuleList() hidden_size = _get_backbone_hidden_size(config) for i in range(len(config.neck_hidden_sizes)): if i <= 1: self.readout_projects.append(nn.Sequential(nn.Identity())) elif i > 1: self.readout_projects.append( nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]) ) def _init_reassemble_dpt(self, config): for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors): self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor)) if config.readout_type == "project": self.readout_projects = nn.ModuleList() hidden_size = _get_backbone_hidden_size(config) for _ in range(len(config.neck_hidden_sizes)): self.readout_projects.append( nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]) ) def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: """ Args: hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`): List of hidden states from the backbone. """ out = [] for i, hidden_state in enumerate(hidden_states): if i not in self.neck_ignore_stages: # reshape to (batch_size, num_channels, height, width) cls_token, hidden_state = hidden_state[:, 0], hidden_state[:, 1:] batch_size, sequence_length, num_channels = hidden_state.shape if patch_height is not None and patch_width is not None: hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels) else: size = int(math.sqrt(sequence_length)) hidden_state = hidden_state.reshape(batch_size, size, size, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_shape = hidden_state.shape if self.config.readout_type == "project": # reshape to (batch_size, height*width, num_channels) hidden_state = hidden_state.flatten(2).permute((0, 2, 1)) readout = cls_token.unsqueeze(1).expand_as(hidden_state) # concatenate the readout token to the hidden states and project hidden_state = self.readout_projects[i](torch.cat((hidden_state, readout), -1)) # reshape back to (batch_size, num_channels, height, width) hidden_state = hidden_state.permute(0, 2, 1).reshape(feature_shape) elif self.config.readout_type == "add": hidden_state = hidden_state.flatten(2) + cls_token.unsqueeze(-1) hidden_state = hidden_state.reshape(feature_shape) hidden_state = self.layers[i](hidden_state) out.append(hidden_state) return out def _get_backbone_hidden_size(config): if config.backbone_config is not None and config.is_hybrid is False: return config.backbone_config.hidden_size else: return config.hidden_size class DPTReassembleLayer(nn.Module): def __init__(self, config, channels, factor): super().__init__() # projection hidden_size = _get_backbone_hidden_size(config) self.projection = nn.Conv2d(in_channels=hidden_size, out_channels=channels, kernel_size=1) # up/down sampling depending on factor if factor > 1: self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0) elif factor == 1: self.resize = nn.Identity() elif factor < 1: # so should downsample self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=int(1 / factor), padding=1) def forward(self, hidden_state): hidden_state = self.projection(hidden_state) hidden_state = self.resize(hidden_state) return hidden_state class DPTFeatureFusionStage(nn.Module): def __init__(self, config): super().__init__() self.layers = nn.ModuleList() for _ in range(len(config.neck_hidden_sizes)): self.layers.append(DPTFeatureFusionLayer(config)) def forward(self, hidden_states): # reversing the hidden_states, we start from the last hidden_states = hidden_states[::-1] fused_hidden_states = [] # first layer only uses the last hidden_state fused_hidden_state = self.layers[0](hidden_states[0]) fused_hidden_states.append(fused_hidden_state) # looping from the last layer to the second for hidden_state, layer in zip(hidden_states[1:], self.layers[1:]): fused_hidden_state = layer(fused_hidden_state, hidden_state) fused_hidden_states.append(fused_hidden_state) return fused_hidden_states class DPTPreActResidualLayer(nn.Module): """ ResidualConvUnit, pre-activate residual unit. Args: config (`[DPTConfig]`): Model configuration class defining the model architecture. """ def __init__(self, config): super().__init__() self.use_batch_norm = config.use_batch_norm_in_fusion_residual use_bias_in_fusion_residual = ( config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm ) self.activation1 = nn.ReLU() self.convolution1 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual, ) self.activation2 = nn.ReLU() self.convolution2 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual, ) if self.use_batch_norm: self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size) self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: residual = hidden_state hidden_state = self.activation1(hidden_state) hidden_state = self.convolution1(hidden_state) if self.use_batch_norm: hidden_state = self.batch_norm1(hidden_state) hidden_state = self.activation2(hidden_state) hidden_state = self.convolution2(hidden_state) if self.use_batch_norm: hidden_state = self.batch_norm2(hidden_state) return hidden_state + residual class DPTFeatureFusionLayer(nn.Module): """Feature fusion layer, merges feature maps from different stages. Args: config (`[DPTConfig]`): Model configuration class defining the model architecture. align_corners (`bool`, *optional*, defaults to `True`): The align_corner setting for bilinear upsample. """ def __init__(self, config, align_corners=True): super().__init__() self.align_corners = align_corners self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True) self.residual_layer1 = DPTPreActResidualLayer(config) self.residual_layer2 = DPTPreActResidualLayer(config) def forward(self, hidden_state, residual=None): if residual is not None: if hidden_state.shape != residual.shape: residual = nn.functional.interpolate( residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode="bilinear", align_corners=False ) hidden_state = hidden_state + self.residual_layer1(residual) hidden_state = self.residual_layer2(hidden_state) hidden_state = nn.functional.interpolate( hidden_state, scale_factor=2, mode="bilinear", align_corners=self.align_corners ) hidden_state = self.projection(hidden_state) return hidden_state class DPTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DPTConfig base_model_prefix = "dpt" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) DPT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ViTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DPT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`DPTImageProcessor.__call__`] for details. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare DPT Model transformer outputting raw hidden-states without any specific head on top.", DPT_START_DOCSTRING, ) class DPTModel(DPTPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config # vit encoder if config.is_hybrid: self.embeddings = DPTViTHybridEmbeddings(config) else: self.embeddings = DPTViTEmbeddings(config) self.encoder = DPTViTEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = DPTViTPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): if self.config.is_hybrid: return self.embeddings else: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(DPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndIntermediateActivations, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPoolingAndIntermediateActivations]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values, return_dict=return_dict) embedding_last_hidden_states = embedding_output[0] if not return_dict else embedding_output.last_hidden_states encoder_outputs = self.encoder( embedding_last_hidden_states, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] + embedding_output[1:] return BaseModelOutputWithPoolingAndIntermediateActivations( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, intermediate_activations=embedding_output.intermediate_activations, ) # Copied from transformers.models.vit.modeling_vit.ViTPooler with ViT->DPT class DPTViTPooler(nn.Module): def __init__(self, config: DPTConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class DPTNeck(nn.Module): """ DPTNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as input and produces another list of tensors as output. For DPT, it includes 2 stages: * DPTReassembleStage * DPTFeatureFusionStage. Args: config (dict): config dict. """ def __init__(self, config): super().__init__() self.config = config # postprocessing: only required in case of a non-hierarchical backbone (e.g. ViT, BEiT) if config.backbone_config is not None and config.backbone_config.model_type in ["swinv2"]: self.reassemble_stage = None else: self.reassemble_stage = DPTReassembleStage(config) self.convs = nn.ModuleList() for channel in config.neck_hidden_sizes: self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False)) # fusion self.fusion_stage = DPTFeatureFusionStage(config) def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: """ Args: hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`): List of hidden states from the backbone. """ if not isinstance(hidden_states, (tuple, list)): raise TypeError("hidden_states should be a tuple or list of tensors") if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.") # postprocess hidden states if self.reassemble_stage is not None: hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)] # fusion blocks output = self.fusion_stage(features) return output class DPTDepthEstimationHead(nn.Module): """ Output head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples the predictions to the input resolution after the first convolutional layer (details can be found in the paper's supplementary material). """ def __init__(self, config): super().__init__() self.config = config self.projection = None if config.add_projection: self.projection = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) features = config.fusion_hidden_size self.head = nn.Sequential( nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1), nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(), ) def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor: # use last features hidden_states = hidden_states[self.config.head_in_index] if self.projection is not None: hidden_states = self.projection(hidden_states) hidden_states = nn.ReLU()(hidden_states) predicted_depth = self.head(hidden_states) predicted_depth = predicted_depth.squeeze(dim=1) return predicted_depth @add_start_docstrings( """ DPT Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2. """, DPT_START_DOCSTRING, ) class DPTForDepthEstimation(DPTPreTrainedModel): def __init__(self, config): super().__init__(config) self.backbone = None if config.is_hybrid is False and (config.backbone_config is not None or config.backbone is not None): self.backbone = load_backbone(config) else: self.dpt = DPTModel(config, add_pooling_layer=False) # Neck self.neck = DPTNeck(config) # Depth estimation head self.head = DPTDepthEstimationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth depth estimation maps for computing the loss. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DPTForDepthEstimation >>> import torch >>> import numpy as np >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("Intel/dpt-large") >>> model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) ... predicted_depth = outputs.predicted_depth >>> # interpolate to original size >>> prediction = torch.nn.functional.interpolate( ... predicted_depth.unsqueeze(1), ... size=image.size[::-1], ... mode="bicubic", ... align_corners=False, ... ) >>> # visualize the prediction >>> output = prediction.squeeze().cpu().numpy() >>> formatted = (output * 255 / np.max(output)).astype("uint8") >>> depth = Image.fromarray(formatted) ```""" loss = None if labels is not None: raise NotImplementedError("Training is not implemented yet") return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions if self.backbone is not None: outputs = self.backbone.forward_with_filtered_kwargs( pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions ) hidden_states = outputs.feature_maps else: outputs = self.dpt( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) hidden_states = outputs.hidden_states if return_dict else outputs[1] # only keep certain features based on config.backbone_out_indices # note that the hidden_states also include the initial embeddings if not self.config.is_hybrid: hidden_states = [ feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices ] else: backbone_hidden_states = outputs.intermediate_activations if return_dict else list(outputs[-1]) backbone_hidden_states.extend( feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices[2:] ) hidden_states = backbone_hidden_states patch_height, patch_width = None, None if self.config.backbone_config is not None and self.config.is_hybrid is False: _, _, height, width = pixel_values.shape patch_size = self.config.backbone_config.patch_size patch_height = height // patch_size patch_width = width // patch_size hidden_states = self.neck(hidden_states, patch_height, patch_width) predicted_depth = self.head(hidden_states) if not return_dict: if output_hidden_states: output = (predicted_depth,) + outputs[1:] else: output = (predicted_depth,) + outputs[2:] return ((loss,) + output) if loss is not None else output return DepthEstimatorOutput( loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) class DPTSemanticSegmentationHead(nn.Module): def __init__(self, config): super().__init__() self.config = config features = config.fusion_hidden_size self.head = nn.Sequential( nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(features), nn.ReLU(), nn.Dropout(config.semantic_classifier_dropout), nn.Conv2d(features, config.num_labels, kernel_size=1), nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), ) def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor: # use last features hidden_states = hidden_states[self.config.head_in_index] logits = self.head(hidden_states) return logits class DPTAuxiliaryHead(nn.Module): def __init__(self, config): super().__init__() features = config.fusion_hidden_size self.head = nn.Sequential( nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(features), nn.ReLU(), nn.Dropout(0.1, False), nn.Conv2d(features, config.num_labels, kernel_size=1), ) def forward(self, hidden_states): logits = self.head(hidden_states) return logits @add_start_docstrings( """ DPT Model with a semantic segmentation head on top e.g. for ADE20k, CityScapes. """, DPT_START_DOCSTRING, ) class DPTForSemanticSegmentation(DPTPreTrainedModel): def __init__(self, config): super().__init__(config) self.dpt = DPTModel(config, add_pooling_layer=False) # Neck self.neck = DPTNeck(config) # Segmentation head(s) self.head = DPTSemanticSegmentationHead(config) self.auxiliary_head = DPTAuxiliaryHead(config) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SemanticSegmenterOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DPTForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("Intel/dpt-large-ade") >>> model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if labels is not None and self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one") outputs = self.dpt( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) hidden_states = outputs.hidden_states if return_dict else outputs[1] # only keep certain features based on config.backbone_out_indices # note that the hidden_states also include the initial embeddings if not self.config.is_hybrid: hidden_states = [ feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices ] else: backbone_hidden_states = outputs.intermediate_activations if return_dict else list(outputs[-1]) backbone_hidden_states.extend( feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices[2:] ) hidden_states = backbone_hidden_states hidden_states = self.neck(hidden_states=hidden_states) logits = self.head(hidden_states) auxiliary_logits = None if self.auxiliary_head is not None: auxiliary_logits = self.auxiliary_head(hidden_states[-1]) loss = None if labels is not None: # upsample logits to the images' original size upsampled_logits = nn.functional.interpolate( logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) if auxiliary_logits is not None: upsampled_auxiliary_logits = nn.functional.interpolate( auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) # compute weighted loss loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) main_loss = loss_fct(upsampled_logits, labels) auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels) loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, )
transformers/src/transformers/models/dpt/modeling_dpt.py/0
{ "file_path": "transformers/src/transformers/models/dpt/modeling_dpt.py", "repo_id": "transformers", "token_count": 23913 }
361
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert EnCodec checkpoints.""" import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() logger = logging.get_logger("transformers.models.encodec") MAPPING_QUANTIZER = { "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited", "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size", "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed", "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg", } MAPPING_ENCODER = { "encoder.model.0.conv.conv": "encoder.layers.0.conv", "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv", "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv", "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv", "encoder.model.3.conv.conv": "encoder.layers.3.conv", "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv", "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv", "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv", "encoder.model.6.conv.conv": "encoder.layers.6.conv", "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv", "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv", "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv", "encoder.model.9.conv.conv": "encoder.layers.9.conv", "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv", "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv", "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv", "encoder.model.12.conv.conv": "encoder.layers.12.conv", "encoder.model.13.lstm": "encoder.layers.13.lstm", "encoder.model.15.conv.conv": "encoder.layers.15.conv", } MAPPING_ENCODER_48K = { "encoder.model.0.conv.norm": "encoder.layers.0.norm", "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm", "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm", "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm", "encoder.model.3.conv.norm": "encoder.layers.3.norm", "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm", "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm", "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm", "encoder.model.6.conv.norm": "encoder.layers.6.norm", "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm", "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm", "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm", "encoder.model.9.conv.norm": "encoder.layers.9.norm", "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm", "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm", "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm", "encoder.model.12.conv.norm": "encoder.layers.12.norm", "encoder.model.15.conv.norm": "encoder.layers.15.norm", } MAPPING_DECODER = { "decoder.model.0.conv.conv": "decoder.layers.0.conv", "decoder.model.1.lstm": "decoder.layers.1.lstm", "decoder.model.3.convtr.convtr": "decoder.layers.3.conv", "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv", "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv", "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv", "decoder.model.6.convtr.convtr": "decoder.layers.6.conv", "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv", "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv", "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv", "decoder.model.9.convtr.convtr": "decoder.layers.9.conv", "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv", "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv", "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv", "decoder.model.12.convtr.convtr": "decoder.layers.12.conv", "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv", "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv", "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv", "decoder.model.15.conv.conv": "decoder.layers.15.conv", } MAPPING_DECODER_48K = { "decoder.model.0.conv.norm": "decoder.layers.0.norm", "decoder.model.3.convtr.norm": "decoder.layers.3.norm", "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm", "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm", "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm", "decoder.model.6.convtr.norm": "decoder.layers.6.norm", "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm", "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm", "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm", "decoder.model.9.convtr.norm": "decoder.layers.9.norm", "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm", "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm", "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm", "decoder.model.12.convtr.norm": "decoder.layers.12.norm", "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm", "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm", "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm", "decoder.model.15.conv.norm": "decoder.layers.15.norm", } MAPPING_24K = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } MAPPING_48K = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } TOP_LEVEL_KEYS = [] IGNORE_KEYS = [] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value elif weight_type == "running_mean": hf_pointer.running_mean.data = value elif weight_type == "running_var": hf_pointer.running_var.data = value elif weight_type == "num_batches_tracked": hf_pointer.num_batches_tracked.data = value elif weight_type == "weight_ih_l0": hf_pointer.weight_ih_l0.data = value elif weight_type == "weight_hh_l0": hf_pointer.weight_hh_l0.data = value elif weight_type == "bias_ih_l0": hf_pointer.bias_ih_l0.data = value elif weight_type == "bias_hh_l0": hf_pointer.bias_hh_l0.data = value elif weight_type == "weight_ih_l1": hf_pointer.weight_ih_l1.data = value elif weight_type == "weight_hh_l1": hf_pointer.weight_hh_l1.data = value elif weight_type == "bias_ih_l1": hf_pointer.bias_ih_l1.data = value elif weight_type == "bias_hh_l1": hf_pointer.bias_hh_l1.data = value else: hf_pointer.data = value logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.") def should_ignore(name, ignore_keys): for key in ignore_keys: if key.endswith(".*"): if name.startswith(key[:-1]): return True elif ".*." in key: prefix, suffix = key.split(".*.") if prefix in name and suffix in name: return True elif key in name: return True return False def recursively_load_weights(orig_dict, hf_model, model_name): unused_weights = [] if model_name in ["encodec_24khz", "encodec_32khz"]: MAPPING = MAPPING_24K elif model_name == "encodec_48khz": MAPPING = MAPPING_48K else: raise ValueError(f"Unsupported model: {model_name}") for name, value in orig_dict.items(): if should_ignore(name, IGNORE_KEYS): logger.info(f"{name} was ignored") continue is_used = False for key, mapped_key in MAPPING.items(): if "*" in key: prefix, suffix = key.split(".*.") if prefix in name and suffix in name: key = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed") and name.endswith("embed_avg"): continue is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "weight_ih_l0" in name: weight_type = "weight_ih_l0" elif "weight_hh_l0" in name: weight_type = "weight_hh_l0" elif "bias_ih_l0" in name: weight_type = "bias_ih_l0" elif "bias_hh_l0" in name: weight_type = "bias_hh_l0" elif "weight_ih_l1" in name: weight_type = "weight_ih_l1" elif "weight_hh_l1" in name: weight_type = "weight_hh_l1" elif "bias_ih_l1" in name: weight_type = "bias_ih_l1" elif "bias_hh_l1" in name: weight_type = "bias_hh_l1" elif "bias" in name: weight_type = "bias" elif "weight" in name: weight_type = "weight" elif "running_mean" in name: weight_type = "running_mean" elif "running_var" in name: weight_type = "running_var" elif "num_batches_tracked" in name: weight_type = "num_batches_tracked" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") @torch.no_grad() def convert_checkpoint( model_name, checkpoint_path, pytorch_dump_folder_path, config_path=None, repo_id=None, ): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = EncodecConfig.from_pretrained(config_path) else: config = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": config.upsampling_ratios = [8, 5, 4, 4] config.target_bandwidths = [2.2] config.num_filters = 64 config.sampling_rate = 32_000 config.codebook_size = 2048 config.use_causal_conv = False config.normalize = False config.use_conv_shortcut = False elif model_name == "encodec_48khz": config.upsampling_ratios = [8, 5, 4, 2] config.target_bandwidths = [3.0, 6.0, 12.0, 24.0] config.sampling_rate = 48_000 config.audio_channels = 2 config.use_causal_conv = False config.norm_type = "time_group_norm" config.normalize = True config.chunk_length_s = 1.0 config.overlap = 0.01 else: raise ValueError(f"Unknown model name: {model_name}") model = EncodecModel(config) feature_extractor = EncodecFeatureExtractor( feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, ) feature_extractor.save_pretrained(pytorch_dump_folder_path) original_checkpoint = torch.load(checkpoint_path) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights original_checkpoint = original_checkpoint["best_state"] recursively_load_weights(original_checkpoint, model, model_name) model.save_pretrained(pytorch_dump_folder_path) if repo_id: print("Pushing to the hub...") feature_extractor.push_to_hub(repo_id) model.push_to_hub(repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model", default="encodec_24khz", type=str, help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) args = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
transformers/src/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 7119 }
362
# coding=utf-8 # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ESM model.""" from __future__ import annotations import os from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPoolingAndCrossAttentions, TFMaskedLMOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, shape_list, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, stable_softmax from ...utils import logging from .configuration_esm import EsmConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D" _CONFIG_FOR_DOC = "EsmConfig" def rotate_half(x): x1, x2 = tf.split(x, 2, axis=-1) return tf.concat((-x2, x1), axis=-1) def apply_rotary_pos_emb(x, cos, sin): cos = cos[:, :, : tf.shape(x)[-2], :] sin = sin[:, :, : tf.shape(x)[-2], :] return (x * cos) + (rotate_half(x) * sin) def symmetrize(x): "Make layer symmetric in final two dimensions, used for contact prediction." return x + tf.linalg.matrix_transpose(x) # Transposes last two dimensions only def average_product_correct(x): "Perform average product correct, used for contact prediction." a1 = tf.reduce_sum(x, -1, keepdims=True) a2 = tf.reduce_sum(x, -2, keepdims=True) a12 = tf.reduce_sum(x, (-1, -2), keepdims=True) avg = a1 * a2 avg = avg / a12 normalized = x - avg return normalized class TFRotaryEmbedding(keras.layers.Layer): """ Rotary position embeddings based on those in [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation matrices which depend on their relative positions. """ def __init__(self, dim: int, name=None): super().__init__(name=name) # Matt: The PyTorch version of this layer does a lot of work to cache values, but we just rely on TF compilation # and/or XLA to sort out constants like that. It actually may not seem like this layer needs to be stateful at # all when we benefit from TF compilation, but it does. The reason is that self.inv_freq is a buffer in the # original implementation, but all the shared ESM checkpoints were trained with fp16 params. This means that # the inv_freq tensor was stored as a float16, and we need to replicate those lower-precision values or our # models give different outputs from the original. self.dim = dim def build(self, input_shape): super().build(input_shape) self.inv_freq = self.add_weight( "inv_freq", shape=(self.dim // 2,), dtype=tf.float32, initializer=get_initializer(1.0), trainable=False ) self.inv_freq.assign( 1.0 / (10000 ** (tf.range(start=0, limit=self.dim, delta=2, dtype=tf.float32) / self.dim)) ) def _compute_cos_sin(self, x, seq_dimension=2): seq_len = tf.shape(x)[seq_dimension] t = tf.range(seq_len, dtype=self.inv_freq.dtype) freqs = tf.einsum("i, j -> ij", t, self.inv_freq) # Outer multiplication emb = tf.concat((freqs, freqs), axis=-1)[None, None, :, :] return tf.cos(emb), tf.sin(emb) def call(self, q: tf.Tensor, k: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: cos_emb, sin_emb = self._compute_cos_sin(k, seq_dimension=-2) return ( apply_rotary_pos_emb(q, cos_emb, sin_emb), apply_rotary_pos_emb(k, cos_emb, sin_emb), ) class TFEsmContactPredictionHead(keras.layers.Layer): """Performs symmetrization, apc, and computes a logistic regression on the output features""" def __init__( self, in_features: int, bias=True, eos_idx: int = 2, name=None, ): super().__init__(name=name) self.eos_idx = eos_idx self.in_features = in_features self.regression = keras.layers.Dense(1, use_bias=bias, activation="sigmoid", name="regression") def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "regression", None) is not None: with tf.name_scope(self.regression.name): self.regression.build((None, self.in_features)) def call(self, tokens, attentions): # remove eos token attentions eos_mask = tf.cast(tokens != self.eos_idx, attentions.dtype) eos_mask = tf.expand_dims(eos_mask, 1) * tf.expand_dims(eos_mask, 2) attentions = attentions * eos_mask[:, None, None, :, :] attentions = attentions[..., :-1, :-1] # remove cls token attentions attentions = attentions[..., 1:, 1:] batch_size, layers, heads, seqlen, _ = shape_list(attentions) attentions = tf.reshape(attentions, (batch_size, layers * heads, seqlen, seqlen)) # features: batch x channels x tokens x tokens (symmetric) attentions = average_product_correct(symmetrize(attentions)) attentions = tf.transpose(attentions, perm=(0, 2, 3, 1)) return tf.squeeze(self.regression(attentions), 3) class TFEsmEmbeddings(keras.layers.Layer): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config, name=None): super().__init__(name=name) self.word_embeddings = keras.layers.Embedding( config.vocab_size, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="word_embeddings", ) self.position_embeddings = keras.layers.Embedding( config.max_position_embeddings, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="position_embeddings", ) if config.emb_layer_norm_before: self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") else: self.layer_norm = None # Matt: I think this line was copied incorrectly from BERT, disabling for now # self.dropout = Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.position_ids = tf.range(config.max_position_embeddings)[None, :] self.padding_idx = config.pad_token_id self.token_dropout = config.token_dropout self.mask_token_id = config.mask_token_id self.config = config def call( self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.word_embeddings(input_ids) # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an # embedding_scale factor here. embeddings = inputs_embeds # Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout # flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however, # masked tokens are treated as if they were selected for input dropout and zeroed out. # This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by # a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample). # This is analogous to the way that dropout layers scale down outputs during evaluation when not # actually dropping out values (or, equivalently, scale up their un-dropped outputs in training). if self.token_dropout: embeddings = tf.where((input_ids == self.mask_token_id)[:, :, None], 0.0, embeddings) mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs src_lengths = tf.cast(tf.reduce_sum(attention_mask, axis=-1), tf.float32) masked_tokens = input_ids == self.mask_token_id mask_ratio_observed = tf.math.count_nonzero(masked_tokens, dtype=tf.float32, axis=-1) / src_lengths embeddings = embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None] if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings if self.layer_norm is not None: embeddings = self.layer_norm(embeddings) if attention_mask is not None: embeddings = embeddings * tf.cast(tf.expand_dims(attention_mask, -1), embeddings.dtype) # Matt: I think this line was copied incorrectly from BERT, disabling it for now. # embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: tf.Tensor Returns: tf.Tensor """ input_shape = shape_list(inputs_embeds)[:-1] sequence_length = input_shape[1] position_ids = tf.range( start=self.padding_idx + 1, limit=sequence_length + self.padding_idx + 1, dtype=tf.int64 ) return tf.broadcast_to(tf.expand_dims(position_ids, 0), input_shape) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "word_embeddings", None) is not None: with tf.name_scope(self.word_embeddings.name): self.word_embeddings.build(None) if getattr(self, "position_embeddings", None) is not None: with tf.name_scope(self.position_embeddings.name): self.position_embeddings.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) class TFEsmSelfAttention(keras.layers.Layer): def __init__(self, config, position_embedding_type=None, name=None): super().__init__(name=name) if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) self.rotary_embeddings = None if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = keras.layers.Embedding( 2 * config.max_position_embeddings - 1, self.attention_head_size, embeddings_initializer=get_initializer(config.initializer_range), ) elif self.position_embedding_type == "rotary": self.rotary_embeddings = TFRotaryEmbedding(dim=self.attention_head_size, name="rotary_embeddings") self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor: new_x_shape = shape_list(x)[:-1] + [self.num_attention_heads, self.attention_head_size] x = tf.reshape(x, new_x_shape) return tf.transpose(x, perm=(0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, output_attentions: Optional[bool] = False, training: bool = False, ) -> Tuple[tf.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim). # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent, # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original # ESM code and fix rotary embeddings. query_layer = query_layer * self.attention_head_size**-0.5 if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) if self.position_embedding_type == "rotary": query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = shape_list(hidden_states)[1] position_ids_l = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), -1) position_ids_r = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), 0) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = tf.cast(positional_embedding, query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = tf.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in EsmModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = attention_probs @ value_layer context_layer = tf.transpose(context_layer, perm=(0, 2, 1, 3)) new_context_layer_shape = shape_list(context_layer)[:-2] + [self.all_head_size] context_layer = tf.reshape(context_layer, new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) if getattr(self, "rotary_embeddings", None) is not None: with tf.name_scope(self.rotary_embeddings.name): self.rotary_embeddings.build(None) class TFEsmSelfOutput(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states += input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFEsmAttention(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.self = TFEsmSelfAttention(config, name="self") self.output_layer = TFEsmSelfOutput(config, name="output") self.pruned_heads = set() self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def prune_heads(self, heads): raise NotImplementedError def call( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, training=False, ): hidden_states_ln = self.LayerNorm(hidden_states) self_outputs = self.self( hidden_states_ln, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, training, ) attention_output = self.output_layer(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "output_layer", None) is not None: with tf.name_scope(self.output_layer.name): self.output_layer.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFEsmIntermediate(keras.layers.Layer): def __init__(self, config: EsmConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = tf.nn.gelu(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFEsmOutput(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states += input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) class TFEsmLayer(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = TFEsmAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TFEsmAttention(config) self.intermediate = TFEsmIntermediate(config, name="intermediate") self.output_layer = TFEsmOutput(config, name="output") self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, training=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise AttributeError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated" " with cross-attention layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layernorm_output = self.LayerNorm(attention_output) intermediate_output = self.intermediate(hidden_states=layernorm_output) layer_output = self.output_layer( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "output_layer", None) is not None: with tf.name_scope(self.output_layer.name): self.output_layer.build(None) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFEsmEncoder(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.config = config self.layer = [TFEsmLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] self.emb_layer_norm_after = keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="emb_layer_norm_after" ) def call( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, training=False, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if self.emb_layer_norm_after: hidden_states = self.emb_layer_norm_after(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "emb_layer_norm_after", None) is not None: with tf.name_scope(self.emb_layer_norm_after.name): self.emb_layer_norm_after.build([None, None, self.config.hidden_size]) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Esm class TFEsmPooler(keras.layers.Layer): def __init__(self, config: EsmConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFEsmPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = EsmConfig base_model_prefix = "esm" ESM_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Keras [Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular Keras model and refer to the TF/Keras documentation for all matters related to general usage and behavior. Parameters: config ([`EsmConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ ESM_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.", ESM_START_DOCSTRING, ) class TFEsmMainLayer(keras.layers.Layer): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config, add_pooling_layer=True, name=None, **kwargs): super().__init__(name=name, **kwargs) self.config = config self.is_decoder = config.is_decoder self.embeddings = TFEsmEmbeddings(config, name="embeddings") self.encoder = TFEsmEncoder(config, name="encoder") self.pooler = TFEsmPooler(config, name="pooler") if add_pooling_layer else None self.contact_head = TFEsmContactPredictionHead( in_features=self.config.num_hidden_layers * self.config.num_attention_heads, bias=True, name="contact_head" ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) if getattr(self, "contact_head", None) is not None: with tf.name_scope(self.contact_head.name): self.contact_head.build(None) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.word_embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: if not self.config.is_decoder: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape if past_key_values is None: past_key_values_length = 0 past_key_values = [None] * len(self.encoder.layer) else: past_key_values_length = shape_list(past_key_values[0][0])[-2] if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) embedding_output = self.embeddings( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask_shape = shape_list(attention_mask) mask_seq_length = seq_length + past_key_values_length # Copied from `modeling_tf_t5.py` # Provided a padding mask of dimensions [batch_size, mask_seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal( tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None], ) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask * attention_mask[:, None, :] attention_mask_shape = shape_list(extended_attention_mask) extended_attention_mask = tf.reshape( extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2]) ) if past_key_values[0] is not None: # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length] extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape( attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 if self.is_decoder and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) def predict_contacts(self, tokens, attention_mask): attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions attns = tf.stack(attns, axis=1) # Matches the original model layout # In the original model, attentions for padding tokens are completely zeroed out. # This makes no difference most of the time because the other tokens won't attend to them, # but it does for the contact prediction task, which takes attentions as input, # so we have to mimic that here. attention_mask = tf.cast(attention_mask, attns.dtype) attns *= attention_mask[:, None, None, None] attns *= attention_mask[:, None, None, :, None] return self.contact_head(tokens, attns) @add_start_docstrings( "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.", ESM_START_DOCSTRING, ) class TFEsmModel(TFEsmPreTrainedModel): def __init__(self, config: EsmConfig, add_pooling_layer=True, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.esm = TFEsmMainLayer(config, add_pooling_layer=add_pooling_layer, name="esm") @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation """ outputs = self.esm( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def predict_contacts(self, tokens, attention_mask): return self.esm.predict_contacts(tokens, attention_mask) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "esm", None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) @add_start_docstrings("""ESM Model with a `language modeling` head on top.""", ESM_START_DOCSTRING) class TFEsmForMaskedLM(TFEsmPreTrainedModel, TFMaskedLanguageModelingLoss): _keys_to_ignore_on_load_missing = [r"position_ids"] _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm") self.lm_head = TFEsmLMHead(config, name="lm_head") if config.tie_word_embeddings: # Ensure word embeddings are built so that we actually have something to tie with tf.name_scope(os.path.join(self._name_scope(), "esm", "embeddings", "word_embeddings")): self.esm.embeddings.word_embeddings.build((None, None)) self.lm_head.decoder = self.esm.embeddings.word_embeddings.weights[0] def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings def get_lm_head(self): return self.lm_head @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Used to hide legacy arguments that have been deprecated. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: masked_lm_loss = self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return TFMaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def predict_contacts(self, tokens, attention_mask): return self.esm.predict_contacts(tokens, attention_mask) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "esm", None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) class TFEsmLMHead(keras.layers.Layer): """ESM Head for masked language modeling.""" def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") if config.tie_word_embeddings: self.decoder = None else: self.decoder = keras.layers.Dense( config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="decoder", use_bias=False, ) self.config = config def build(self, input_shape=None): # Separate bias to match the PT model and allow weight cross-loading to work # Put it in the build so it gets the right name when adding it as a weight if self.built: return self.built = True self.bias = self.add_weight("bias", shape=(self.config.vocab_size,), initializer="zeros", trainable=True) if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "decoder", None) is not None and not self.config.tie_word_embeddings: with tf.name_scope(self.decoder.name): self.decoder.build([None, None, self.config.hidden_size]) def get_bias(self): return {"bias": self.bias} def call(self, features): x = self.dense(features) x = tf.nn.gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias if self.config.tie_word_embeddings: x = tf.matmul(x, self.decoder, transpose_b=True) + self.bias else: x = self.decoder(x) + self.bias return x @add_start_docstrings( """ ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ESM_START_DOCSTRING, ) class TFEsmForSequenceClassification(TFEsmPreTrainedModel, TFSequenceClassificationLoss): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm") self.classifier = TFEsmClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "esm", None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings( """ ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ESM_START_DOCSTRING, ) class TFEsmForTokenClassification(TFEsmPreTrainedModel, TFTokenClassificationLoss): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense(config.num_labels, name="classifier") self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "esm", None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) class TFEsmClassificationHead(keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.out_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), activation="linear", name="out_proj", ) self.config = config def call(self, features, training=False): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x, training=training) x = self.dense(x) x = self.dropout(x, training=training) x = self.out_proj(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: tf.Tensor x: Returns: tf.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = tf.cast(input_ids != padding_idx, tf.int64) incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask return incremental_indices + padding_idx
transformers/src/transformers/models/esm/modeling_tf_esm.py/0
{ "file_path": "transformers/src/transformers/models/esm/modeling_tf_esm.py", "repo_id": "transformers", "token_count": 29926 }
363
# coding=utf-8 # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def rreplace(s, old, new, occurrence): li = s.rsplit(old, occurrence) return new.join(li) def count_parameters(state_dict): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items()) def upgrade_state_dict(state_dict): upgrade = {} group_keys = ["group_1", "group_2", "group_3", "group_4"] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: key = key.replace(f"{group_key}.", f"{group_key}.group.") if "res_path" in key: key = key.replace("res_path.", "res_path.path.") if key.endswith(".w"): key = rreplace(key, ".w", ".weight", 1) if key.endswith(".b"): key = rreplace(key, ".b", ".bias", 1) upgrade[key] = value.float() return upgrade @torch.no_grad() def convert_dalle_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, save_checkpoint=True): """ Copy/paste/tweak model's weights to transformers design. """ from dall_e import Encoder encoder = Encoder() if os.path.exists(checkpoint_path): ckpt = torch.load(checkpoint_path) else: ckpt = torch.hub.load_state_dict_from_url(checkpoint_path) if isinstance(ckpt, Encoder): ckpt = ckpt.state_dict() encoder.load_state_dict(ckpt) if config_path is not None: config = FlavaImageCodebookConfig.from_pretrained(config_path) else: config = FlavaImageCodebookConfig() hf_model = FlavaImageCodebook(config).eval() state_dict = encoder.state_dict() hf_state_dict = upgrade_state_dict(state_dict) hf_model.load_state_dict(hf_state_dict) hf_state_dict = hf_model.state_dict() hf_count = count_parameters(hf_state_dict) state_dict_count = count_parameters(state_dict) assert torch.allclose(hf_count, state_dict_count, atol=1e-3) if save_checkpoint: hf_model.save_pretrained(pytorch_dump_folder_path) else: return hf_state_dict if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") args = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
transformers/src/transformers/models/flava/convert_dalle_to_flava_codebook.py/0
{ "file_path": "transformers/src/transformers/models/flava/convert_dalle_to_flava_codebook.py", "repo_id": "transformers", "token_count": 1300 }
364
# coding=utf-8 # Copyright 2023 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Fuyu model.""" from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...modeling_outputs import CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...models.auto.modeling_auto import AutoModelForCausalLM from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_fuyu import FuyuConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "FuyuConfig" FUYU_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`FuyuConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Fuyu Model outputting raw hidden-states without any specific head on top.", FUYU_START_DOCSTRING, ) class FuyuPreTrainedModel(PreTrainedModel): config_class = FuyuConfig base_model_prefix = "fuyu" supports_gradient_checkpointing = True _no_split_modules = [] _skip_keys_device_placement = "past_key_values" def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() FUYU_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*): Image patches to be used as continuous embeddings. The patches are flattened and then projected to the hidden size of the model. image_patches_indices (`torch.LongTensor` of shape `(batch_size, num_total_patches + number_of_newline_tokens + number_of_text_tokens, patch_size_ x patch_size x num_channels )`, *optional*): Indices indicating at which position the image_patches have to be inserted in input_embeds. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.", FUYU_START_DOCSTRING, ) class FuyuForCausalLM(FuyuPreTrainedModel): def __init__(self, config: FuyuConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.text_config.vocab_size self.language_model = AutoModelForCausalLM.from_config( config.text_config, attn_implementation=config._attn_implementation ) self.vision_embed_tokens = nn.Linear( config.patch_size * config.patch_size * config.num_channels, config.hidden_size ) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_output_embeddings(self): return self.language_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def set_decoder(self, decoder): self.language_model.set_decoder(decoder) def get_decoder(self): return self.language_model.get_decoder() def tie_weights(self): return self.language_model.tie_weights() def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding: # TODO: config.vocab_size is deprecated and will be removed in v4.43. # `resize_token_embeddings` should work from `modeling_utils.py`` model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of) self.config.text_config.vocab_size = model_embeds.num_embeddings self.config.vocab_size = model_embeds.num_embeddings self.vocab_size = model_embeds.num_embeddings return model_embeds def gather_continuous_embeddings( self, word_embeddings: torch.Tensor, continuous_embeddings: List[torch.Tensor], image_patch_input_indices: torch.Tensor, ) -> torch.Tensor: """This function places the continuous_embeddings into the word_embeddings at the locations indicated by image_patch_input_indices. Different batch elements can have different numbers of continuous embeddings. Args: word_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Tensor of word embeddings. continuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Tensor of continuous embeddings. The length of the list is the batch size. Each entry is shape [num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative indices in image_patch_input_indices for that batch element. image_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Tensor of indices of the image patches in the input_ids tensor. """ if not (word_embeddings.shape[0] == len(continuous_embeddings)): raise ValueError( f"Batch sizes must match! Got {len(continuous_embeddings)=} and {word_embeddings.shape[0]=}" ) output_embeddings = word_embeddings.clone() for batch_idx in range(word_embeddings.shape[0]): # First, find the positions of all the non-negative values in image_patch_input_indices, those are the # positions in word_embeddings that we want to replace with content from continuous_embeddings. dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0] # Next look up those indices in image_patch_input_indices to find the indices in continuous_embeddings that we # want to use to replace the values in word_embeddings. src_indices = image_patch_input_indices[batch_idx][dst_indices] # Check if we have more indices than embeddings. Note that we could have fewer indices if images got truncated. if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]: raise ValueError( f"Number of continuous embeddings {continuous_embeddings[batch_idx].shape=} does not match " f"number of continuous token ids {src_indices.shape=} in batch element {batch_idx}." ) output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices] return output_embeddings @add_start_docstrings_to_model_forward(FUYU_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, image_patches: torch.Tensor = None, # [batch_size, num_total_patches, patch_size_ x patch_size x num_channels ] image_patches_indices: torch.Tensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Examples: ```python >>> from transformers import FuyuProcessor, FuyuForCausalLM >>> from PIL import Image >>> import requests >>> processor = FuyuProcessor.from_pretrained("adept/fuyu-8b") >>> model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b") >>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" >>> image = Image.open(requests.get(url, stream=True).raw) >>> prompt = "Generate a coco-style caption.\n" >>> inputs = processor(text=prompt, images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> generated_ids = model.generate(**inputs, max_new_tokens=7) >>> generation_text = processor.batch_decode(generated_ids[:, -7:], skip_special_tokens=True) >>> print(generation_text[0]) A blue bus parked on the side of a road. ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_is or inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if image_patches is not None and past_key_values is None: patch_embeddings = [ self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype)) .squeeze(0) .to(inputs_embeds.device) for patch in image_patches ] inputs_embeds = self.gather_continuous_embeddings( word_embeddings=inputs_embeds, continuous_embeddings=patch_embeddings, image_patch_input_indices=image_patches_indices, ) outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, labels=labels, use_cache=use_cache, return_dict=return_dict, ) return outputs def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, image_patches=None, image_patches_indices=None, **kwargs, ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} if image_patches_indices is not None: model_inputs["image_patches_indices"] = image_patches_indices model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "image_patches_indices": image_patches_indices if past_key_values is None else None, "image_patches": image_patches if past_key_values is None else None, } ) return model_inputs
transformers/src/transformers/models/fuyu/modeling_fuyu.py/0
{ "file_path": "transformers/src/transformers/models/fuyu/modeling_fuyu.py", "repo_id": "transformers", "token_count": 7723 }
365
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 OpenAI GPT-2 model.""" from __future__ import annotations from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFCausalLMOutputWithCrossAttentions, TFSequenceClassifierOutputWithPast, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFConv1D, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, TFSequenceSummary, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_gpt2 import GPT2Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "openai-community/gpt2" _CONFIG_FOR_DOC = "GPT2Config" class TFAttention(keras.layers.Layer): def __init__(self, nx, config, scale=False, is_cross_attention=False, **kwargs): super().__init__(**kwargs) n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implementation] assert n_state % config.n_head == 0 self.n_head = config.n_head self.split_size = n_state self.scale = scale self.output_attentions = config.output_attentions self.is_cross_attention = is_cross_attention if self.is_cross_attention: self.c_attn = TFConv1D(n_state * 2, nx, initializer_range=config.initializer_range, name="c_attn") self.q_attn = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="q_attn") else: self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn") self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj") self.attn_dropout = keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = keras.layers.Dropout(config.resid_pdrop) self.pruned_heads = set() self.embed_dim = n_state def prune_heads(self, heads): pass @staticmethod def causal_attention_mask(nd, ns, dtype): """ 1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs. """ i = tf.range(nd)[:, None] j = tf.range(ns) m = i >= j - ns + nd return tf.cast(m, dtype) def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False): # q, k, v have shape [batch, heads, sequence, features] w = tf.matmul(q, k, transpose_b=True) if self.scale: dk = tf.cast(shape_list(k)[-1], dtype=w.dtype) # scale attention_scores w = w / tf.math.sqrt(dk) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. _, _, nd, ns = shape_list(w) b = self.causal_attention_mask(nd, ns, dtype=w.dtype) b = tf.reshape(b, [1, 1, nd, ns]) w = w * b - 1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask attention_mask = tf.cast(attention_mask, dtype=w.dtype) w = w + attention_mask w = stable_softmax(w, axis=-1) w = self.attn_dropout(w, training=training) # Mask heads if we want to if head_mask is not None: w = w * head_mask outputs = [tf.matmul(w, v)] if output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = tf.transpose(x, [0, 2, 1, 3]) x_shape = shape_list(x) new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] return tf.reshape(x, new_x_shape) def split_heads(self, x): x_shape = shape_list(x) new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) def call( self, x, layer_past, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=False, ): if encoder_hidden_states is not None: if not hasattr(self, "q_attn"): raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." ) query = self.q_attn(x) kv_out = self.c_attn(encoder_hidden_states) key, value = tf.split(kv_out, 2, axis=2) attention_mask = encoder_attention_mask else: x = self.c_attn(x) query, key, value = tf.split(x, 3, axis=2) query = self.split_heads(query) key = self.split_heads(key) value = self.split_heads(value) if layer_past is not None: past_key, past_value = tf.unstack(layer_past, axis=0, num=2) key = tf.concat([past_key, key], axis=-2) value = tf.concat([past_value, value], axis=-2) # to cope with keras serialization if use_cache: present = tf.stack([key, value], axis=0) else: present = (None,) attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a, training=training) outputs = [a, present] + attn_outputs[1:] return outputs # a, present, (attentions) def build(self, input_shape=None): if self.built: return self.built = True if self.is_cross_attention: c_attn_shape = 2 * self.embed_dim else: c_attn_shape = 3 * self.embed_dim if getattr(self, "c_proj", None) is not None: with tf.name_scope(self.c_proj.name): self.c_proj.build([None, None, self.embed_dim]) if getattr(self, "c_attn", None) is not None: with tf.name_scope(self.c_attn.name): self.c_attn.build([None, None, c_attn_shape]) if getattr(self, "q_attn", None) is not None: with tf.name_scope(self.q_attn.name): self.q_attn.build([None, None, self.embed_dim]) class TFMLP(keras.layers.Layer): def __init__(self, n_state, config, **kwargs): super().__init__(**kwargs) nx = config.n_embd self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc") self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj") self.act = get_tf_activation(config.activation_function) self.dropout = keras.layers.Dropout(config.resid_pdrop) self.intermediate_size = n_state self.embed_dim = nx def call(self, x, training=False): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) h2 = self.dropout(h2, training=training) return h2 def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "c_fc", None) is not None: with tf.name_scope(self.c_fc.name): self.c_fc.build([None, None, self.intermediate_size]) if getattr(self, "c_proj", None) is not None: with tf.name_scope(self.c_proj.name): self.c_proj.build([None, None, self.embed_dim]) class TFBlock(keras.layers.Layer): def __init__(self, config, scale=False, **kwargs): super().__init__(**kwargs) nx = config.n_embd inner_dim = config.n_inner if config.n_inner is not None else 4 * nx self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1") self.attn = TFAttention(nx, config, scale, name="attn") self.ln_2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2") if config.add_cross_attention: self.crossattention = TFAttention(nx, config, scale, name="crossattention", is_cross_attention=True) self.ln_cross_attn = keras.layers.LayerNormalization( epsilon=config.layer_norm_epsilon, name="ln_cross_attn" ) self.mlp = TFMLP(inner_dim, config, name="mlp") self.hidden_size = config.hidden_size def call( self, x, layer_past, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=False, ): a = self.ln_1(x) output_attn = self.attn( a, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=use_cache, output_attentions=output_attentions, training=training, ) a = output_attn[0] # output_attn: a, present, (attentions) outputs = output_attn[1:] x = x + a # Cross-Attention Block if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) ca = self.ln_cross_attn(x) output_cross_attn = self.crossattention( ca, layer_past=None, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=False, output_attentions=output_attentions, training=training, ) ca = output_cross_attn[0] # output_attn: a, present, (cross_attentions) x = x + ca outputs = outputs + output_cross_attn[2:] # add cross attentions if we output attention weights m = self.ln_2(x) m = self.mlp(m, training=training) x = x + m outputs = [x] + outputs return outputs # x, present, (attentions, cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "ln_1", None) is not None: with tf.name_scope(self.ln_1.name): self.ln_1.build([None, None, self.hidden_size]) if getattr(self, "attn", None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, "ln_2", None) is not None: with tf.name_scope(self.ln_2.name): self.ln_2.build([None, None, self.hidden_size]) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "crossattention", None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None) if getattr(self, "ln_cross_attn", None) is not None: with tf.name_scope(self.ln_cross_attn.name): self.ln_cross_attn.build([None, None, self.hidden_size]) @keras_serializable class TFGPT2MainLayer(keras.layers.Layer): config_class = GPT2Config def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) self.config = config self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.use_cache = config.use_cache self.return_dict = config.use_return_dict self.num_hidden_layers = config.n_layer self.n_embd = config.n_embd self.n_positions = config.n_positions self.initializer_range = config.initializer_range self.wte = keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name="wte", ) self.wpe = keras.layers.Embedding( input_dim=config.n_positions, output_dim=config.n_embd, embeddings_initializer=get_initializer(config.initializer_range), name="wpe", ) self.drop = keras.layers.Dropout(config.embd_pdrop) self.h = [TFBlock(config, scale=True, name=f"h_._{i}") for i in range(config.n_layer)] self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f") self.embed_dim = config.hidden_size def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if past_key_values is None: past_length = 0 past_key_values = [None] * len(self.h) else: past_length = shape_list(past_key_values[0][0])[-2] if position_ids is None: position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0) if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask_shape = shape_list(attention_mask) attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. one_cst = tf.constant(1.0) attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype) attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0)) # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 if self.config.add_cross_attention and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=encoder_hidden_states.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None encoder_attention_mask = encoder_extended_attention_mask # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.wte(token_type_ids) else: token_type_embeds = tf.constant(0.0) position_embeds = tf.cast(position_embeds, dtype=inputs_embeds.dtype) token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype) hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () if use_cache else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block( hidden_states, layer_past, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=training, ) hidden_states, present = outputs[:2] if use_cache: presents = presents + (present,) if output_attentions: all_attentions = all_attentions + (outputs[2],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (outputs[3],) hidden_states = self.ln_f(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "wte", None) is not None: with tf.name_scope(self.wte.name): self.wte.build(None) if getattr(self, "wpe", None) is not None: with tf.name_scope(self.wpe.name): self.wpe.build(None) if getattr(self, "ln_f", None) is not None: with tf.name_scope(self.ln_f.name): self.ln_f.build([None, None, self.embed_dim]) if getattr(self, "h", None) is not None: for layer in self.h: with tf.name_scope(layer.name): layer.build(None) class TFGPT2PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPT2Config base_model_prefix = "transformer" # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias", r"h.\d+.crossattention.bias"] @property def input_signature(self): # Although GPT-2 supports token_type_ids in theory, in practice they are rarely used, and the implementation # means that passing token_type_ids=0 yields different outputs from token_type_ids=None. # Therefore, we remove the token_type_ids argument by default, even though it would usually be included. return { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), } @dataclass class TFGPT2DoubleHeadsModelOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: logits (`tf.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). mc_logits (`tf.Tensor` of shape `(batch_size, num_choices)`): Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None mc_logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None GPT2_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`GPT2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GPT2_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`List[tf.Tensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for `past_key_values`. In other words, the `attention_mask` always has to have the length: `len(past_key_values) + len(input_ids)` [What are attention masks?](../glossary#attention-mask) token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.", GPT2_START_DOCSTRING, ) class TFGPT2Model(TFGPT2PreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name="transformer") @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past`). Set to `False` during training, `True` during generation """ outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) @add_start_docstrings( """ The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, ) class TFGPT2LMHeadModel(TFGPT2PreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name="transformer") def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): token_type_ids = kwargs.get("token_type_ids", None) # only last token for inputs_ids if past is defined in kwargs if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) if token_type_ids is not None: token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1) position_ids = kwargs.get("position_ids", None) attention_mask = kwargs.get("attention_mask", None) if attention_mask is not None and position_ids is None: position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True) if past_key_values: position_ids = tf.expand_dims(position_ids[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, "token_type_ids": token_type_ids, } @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past`). Set to `False` during training, `True` during generation labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = tf.matmul(hidden_states, self.transformer.wte.weights, transpose_b=True) loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels, shifted_logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) @add_start_docstrings( """ The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, GPT2_START_DOCSTRING, ) class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) config.num_labels = 1 self.transformer = TFGPT2MainLayer(config, name="transformer") self.multiple_choice_head = TFSequenceSummary( config, initializer_range=config.initializer_range, name="multiple_choice_head" ) @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFGPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, mc_token_ids: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFGPT2DoubleHeadsModelOutput, Tuple[tf.Tensor]]: r""" mc_token_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - 1]`. Return: Examples: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFGPT2DoubleHeadsModel >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = TFGPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2") >>> # Add a [CLS] to the vocabulary (we should train it also!) >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"}) >>> embedding_layer = model.resize_token_embeddings( ... len(tokenizer) ... ) # Update the model embeddings with the new vocabulary size >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] >>> encoded_choices = [tokenizer.encode(s) for s in choices] >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] >>> input_ids = tf.constant(encoded_choices)[None, :] # Batch size: 1, number of choices: 2 >>> mc_token_ids = tf.constant([cls_token_location]) # Batch size: 1 >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) >>> lm_prediction_scores, mc_prediction_scores = outputs[:2] ```""" if input_ids is not None: input_shapes = shape_list(input_ids) else: input_shapes = shape_list(inputs_embeds)[:-1] seq_length = input_shapes[-1] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None transformer_outputs = self.transformer( input_ids=flat_input_ids, past_key_values=past_key_values, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:]) if return_dict and output_hidden_states: # We do this to match the slightly odd PT behaviour - the final hidden state is reshaped to rank 4 when the # input is rank 3, but all other hidden states remain at rank-3 (with the first 2 dims merged) all_hidden_states = transformer_outputs.hidden_states[:-1] + (hidden_states,) else: all_hidden_states = None lm_logits = tf.matmul(hidden_states, self.transformer.wte.weights, transpose_b=True) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids, training=training) mc_logits = tf.squeeze(mc_logits, axis=-1) if not return_dict: return (lm_logits, mc_logits) + transformer_outputs[1:] return TFGPT2DoubleHeadsModelOutput( logits=lm_logits, mc_logits=mc_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=all_hidden_states, attentions=transformer_outputs.attentions, ) @property def input_signature(self): return { "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), "mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="mc_token_ids"), } def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "multiple_choice_head", None) is not None: with tf.name_scope(self.multiple_choice_head.name): self.multiple_choice_head.build(None) @add_start_docstrings( """ The GPT2 Model transformer with a sequence classification head on top (linear layer). [`TFGPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, GPT2_START_DOCSTRING, ) class TFGPT2ForSequenceClassification(TFGPT2PreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.score = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="score", use_bias=False, ) self.transformer = TFGPT2MainLayer(config, name="transformer") self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint="microsoft/DialogRPT-updown", output_type=TFSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) logits_shape = shape_list(logits) in_logits = None if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = ( tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1) - 1 ) sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1) in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1) else: sequence_lengths = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) loss = None if labels is not None: assert ( self.config.pad_token_id is not None or logits_shape[0] == 1 ), "Cannot handle batch sizes > 1 if no padding token is defined." if not tf.is_tensor(sequence_lengths): in_logits = logits[0 : logits_shape[0], sequence_lengths] loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels])) pooled_logits = in_logits if in_logits is not None else logits if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "score", None) is not None: with tf.name_scope(self.score.name): self.score.build([None, None, self.config.n_embd]) if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None)
transformers/src/transformers/models/gpt2/modeling_tf_gpt2.py/0
{ "file_path": "transformers/src/transformers/models/gpt2/modeling_tf_gpt2.py", "repo_id": "transformers", "token_count": 24582 }
366
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Grounding DINO model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class GroundingDinoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GroundingDinoModel`]. It is used to instantiate a Grounding DINO model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Grounding DINO [IDEA-Research/grounding-dino-tiny](https://huggingface.co/IDEA-Research/grounding-dino-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, defaults to `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`): The config object or dictionary of the text backbone. num_queries (`int`, *optional*, defaults to 900): Number of object queries, i.e. detection slots. This is the maximal number of objects [`GroundingDinoModel`] can detect in a single image. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as an encoder/decoder or not. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. num_feature_levels (`int`, *optional*, defaults to 4): The number of input feature levels. encoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the encoder. decoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the decoder. two_stage (`bool`, *optional*, defaults to `True`): Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of Grounding DINO, which are further fed into the decoder for iterative bounding box refinement. class_cost (`float`, *optional*, defaults to 1.0): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5.0): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. bbox_loss_coefficient (`float`, *optional*, defaults to 5.0): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. disable_custom_kernels (`bool`, *optional*, defaults to `False`): Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom kernels are not supported by PyTorch ONNX export. max_text_len (`int`, *optional*, defaults to 256): The maximum length of the text input. text_enhancer_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the text enhancer. fusion_droppath (`float`, *optional*, defaults to 0.1): The droppath ratio for the fusion module. fusion_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the fusion module. embedding_init_target (`bool`, *optional*, defaults to `True`): Whether to initialize the target with Embedding weights. query_dim (`int`, *optional*, defaults to 4): The dimension of the query vector. decoder_bbox_embed_share (`bool`, *optional*, defaults to `True`): Whether to share the bbox regression head for all decoder layers. two_stage_bbox_embed_share (`bool`, *optional*, defaults to `False`): Whether to share the bbox embedding between the two-stage bbox generator and the region proposal generation. positional_embedding_temperature (`float`, *optional*, defaults to 20): The temperature for Sine Positional Embedding that is used together with vision backbone. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. Examples: ```python >>> from transformers import GroundingDinoConfig, GroundingDinoModel >>> # Initializing a Grounding DINO IDEA-Research/grounding-dino-tiny style configuration >>> configuration = GroundingDinoConfig() >>> # Initializing a model (with random weights) from the IDEA-Research/grounding-dino-tiny style configuration >>> model = GroundingDinoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "grounding-dino" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, text_config=None, num_queries=900, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, auxiliary_loss=False, position_embedding_type="sine", num_feature_levels=4, encoder_n_points=4, decoder_n_points=4, two_stage=True, class_cost=1.0, bbox_cost=5.0, giou_cost=2.0, bbox_loss_coefficient=5.0, giou_loss_coefficient=2.0, focal_alpha=0.25, disable_custom_kernels=False, # other parameters max_text_len=256, text_enhancer_dropout=0.0, fusion_droppath=0.1, fusion_dropout=0.0, embedding_init_target=True, query_dim=4, decoder_bbox_embed_share=True, two_stage_bbox_embed_share=False, positional_embedding_temperature=20, init_std=0.02, layer_norm_eps=1e-5, **kwargs, ): if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") backbone_config = CONFIG_MAPPING["swin"]( window_size=7, image_size=224, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], out_indices=[2, 3, 4], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type # deformable attributes self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.two_stage = two_stage # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.focal_alpha = focal_alpha self.disable_custom_kernels = disable_custom_kernels # Text backbone if isinstance(text_config, dict): text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "bert" text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["bert"]() self.text_config = text_config self.max_text_len = max_text_len # Text Enhancer self.text_enhancer_dropout = text_enhancer_dropout # Fusion self.fusion_droppath = fusion_droppath self.fusion_dropout = fusion_dropout # Others self.embedding_init_target = embedding_init_target self.query_dim = query_dim self.decoder_bbox_embed_share = decoder_bbox_embed_share self.two_stage_bbox_embed_share = two_stage_bbox_embed_share if two_stage_bbox_embed_share and not decoder_bbox_embed_share: raise ValueError("If two_stage_bbox_embed_share is True, decoder_bbox_embed_share must be True.") self.positional_embedding_temperature = positional_embedding_temperature self.init_std = init_std self.layer_norm_eps = layer_norm_eps super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model
transformers/src/transformers/models/grounding_dino/configuration_grounding_dino.py/0
{ "file_path": "transformers/src/transformers/models/grounding_dino/configuration_grounding_dino.py", "repo_id": "transformers", "token_count": 5826 }
367
# coding=utf-8 # Copyright 2024 Meta and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Hiera model.""" import math from dataclasses import dataclass from typing import Dict, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, ModelOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_hiera import HieraConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "HieraConfig" # Base docstring _CHECKPOINT_FOR_DOC = "facebook/hiera-tiny-224-hf" _EXPECTED_OUTPUT_SHAPE = [1, 49, 768] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "facebook/hiera-tiny-224-in1k-hf" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" @dataclass class HieraEncoderOutput(ModelOutput): """ Hiera encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Thesre are the unrolled hidden states of the model. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. These are the reshaped and re-rolled hidden states of the model. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class HieraModelOutput(ModelOutput): """ Hiera model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`): Tensor indicating which patches are masked (0) and which are not (1). ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Tensor containing the original index of the (shuffled) masked patches. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. These are the unrolled hidden states of the model. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. These are the reshaped and re-rolled hidden states of the model. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None bool_masked_pos: torch.BoolTensor = None ids_restore: torch.LongTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class HieraForImageClassificationOutput(ImageClassifierOutput): """ Hiera image classification outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, `optional`): Loss value for the training task. logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`): Prediction scores of the classification head (logits of the output layer). hidden_states (`tuple(torch.FloatTensor)`, `optional`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. These are the unrolled hidden states of the model. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, `optional`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, `optional`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. These are the reshaped and re-rolled hidden states of the model. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class HieraForPreTrainingOutput(ModelOutput): """ Class for HieraForPreTraining's outputs, with potential hidden states and attentions. Args: loss (`torch.FloatTensor` of shape `(1,)`): Pixel reconstruction loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`): Pixel reconstruction logits. bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`): Tensor indicating which patches are masked (0) and which are not (1). ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Tensor containing the original index of the (shuffled) masked patches. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None bool_masked_pos: torch.BoolTensor = None ids_restore: torch.LongTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class HieraPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config, is_mae: bool = False): super().__init__() # Support any number of spatial dimensions self.spatial_dims = len(config.patch_size) if self.spatial_dims != 2: raise ValueError(f"The number of dimensions of the input image should be 2, but got {self.spatial_dims}.") self.num_channels = config.num_channels self.image_size = config.image_size[-2:] self.tokens_spatial_shape = [i // s for i, s in zip(config.image_size, config.patch_stride)] self.mask_spatial_shape = [i // s for i, s in zip(self.tokens_spatial_shape, config.masked_unit_size)] self.mask_ratio = config.mask_ratio self.is_mae = is_mae self.projection = nn.Conv2d( self.num_channels, config.embed_dim, kernel_size=config.patch_size, stride=config.patch_stride, padding=config.patch_padding, ) def masked_conv( self, pixel_values: torch.FloatTensor, bool_masked_pos: Optional[torch.BoolTensor] = None ) -> torch.Tensor: """Zero-out the masked regions of the input before conv. Prevents leakage of masked regions when using overlapping kernels. """ if bool_masked_pos is None: return self.projection(pixel_values) target_size = pixel_values.shape[2:] # Reshape bool_masked_pos to (batch_size, 1, mask_unit_height, mask_unit_width) bool_masked_pos = bool_masked_pos.view(pixel_values.shape[0], 1, *self.mask_spatial_shape) bool_masked_pos = nn.functional.interpolate(bool_masked_pos.float(), size=target_size) return self.projection(pixel_values * bool_masked_pos) def random_masking( self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor] = None ) -> Tuple[torch.BoolTensor, torch.LongTensor]: """ Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random noise. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`) noise (`torch.FloatTensor` of shape `(batch_size, num_mask_units)`, *optional*) which is mainly used for testing purposes to control randomness and maintain the reproducibility """ batch_size = pixel_values.shape[0] # Tokens selected for masking at mask unit level num_windows = math.prod(self.mask_spatial_shape) len_keep = int(num_windows * (1 - self.mask_ratio)) if noise is None: noise = torch.rand(batch_size, num_windows, device=pixel_values.device) # Sort noise for each sample ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=1).to(pixel_values.device) # Generate the binary bool_masked_pos: 1 is *keep*, 0 is *remove* # Note this is opposite to original MAE bool_masked_pos = torch.zeros([batch_size, num_windows], device=pixel_values.device) bool_masked_pos[:, :len_keep] = 1 # Unshuffle to get the binary bool_masked_pos bool_masked_pos = torch.gather(bool_masked_pos, dim=1, index=ids_restore).bool() return bool_masked_pos, ids_restore def forward( self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.BoolTensor], Optional[torch.LongTensor]]: (bool_masked_pos, ids_restore) = ( self.random_masking(pixel_values, noise=noise) if self.is_mae else (None, None) ) embeddings = self.masked_conv(pixel_values, bool_masked_pos) embeddings = embeddings.flatten(2).transpose(2, 1) return embeddings, bool_masked_pos, ids_restore class HieraEmbeddings(nn.Module): """ Construct position and patch embeddings. """ def __init__(self, config: HieraConfig, is_mae: bool = False) -> None: super().__init__() self.patch_stride = config.patch_stride tokens_spatial_shape = [i // s for i, s in zip(config.image_size, config.patch_stride)] self.mask_spatial_shape = [i // s for i, s in zip(tokens_spatial_shape, config.masked_unit_size)] self.num_tokens = math.prod(tokens_spatial_shape) self.is_mae = is_mae self.patch_embeddings = HieraPatchEmbeddings(config, is_mae=is_mae) self.position_embeddings = nn.Parameter(torch.zeros(1, self.num_tokens, config.embed_dim)) def interpolate_pos_encoding( self, embeddings: torch.Tensor, pos_embeds: torch.Tensor, height: int, width: int ) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. Adapted from: https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ num_patches = embeddings.shape[1] num_positions = pos_embeds.shape[1] if num_patches == num_positions and height == width: return pos_embeds dim = embeddings.shape[-1] h0 = height // self.patch_stride[0] w0 = width // self.patch_stride[1] # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 h0, w0 = h0 + 0.1, w0 + 0.1 pos_embeds = pos_embeds.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) pos_embeds = pos_embeds.permute(0, 3, 1, 2) pos_embeds = nn.functional.interpolate( pos_embeds, scale_factor=(h0 / math.sqrt(num_positions), w0 / math.sqrt(num_positions)), mode="bicubic", align_corners=False, ) if int(h0) != pos_embeds.shape[-2] or int(w0) != pos_embeds.shape[-1]: raise ValueError("The interpolated position encoding does not have the right size") pos_embeds = pos_embeds.permute(0, 2, 3, 1).view(1, -1, dim) return pos_embeds def get_position_embedding( self, embeddings: torch.Tensor, height: int, width: int, interpolate_pos_encoding: bool ) -> torch.FloatTensor: position_embeddings = self.position_embeddings position_embeddings = ( self.interpolate_pos_encoding(embeddings, position_embeddings, height, width) if interpolate_pos_encoding else position_embeddings ) return position_embeddings def forward( self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.BoolTensor], Optional[torch.LongTensor]]: height, width = pixel_values.shape[-2:] embeddings, bool_masked_pos, ids_restore = self.patch_embeddings(pixel_values, noise=noise) embeddings = embeddings + self.get_position_embedding(embeddings, height, width, interpolate_pos_encoding) return embeddings, bool_masked_pos, ids_restore class HieraMaskUnitAttention(nn.Module): """ Computes either Mask Unit or Global Attention. Also is able to perform query pooling. Note: this assumes the tokens have already been flattened and unrolled into mask units. """ def __init__( self, hidden_size: int, hidden_size_output: int, num_heads: int, query_stride: int = 1, window_size: int = 0, use_mask_unit_attn: bool = False, ) -> None: super().__init__() self.num_heads = num_heads self.query_stride = query_stride self.hidden_size_output = hidden_size_output self.head_dim = hidden_size_output // num_heads self.scale = (self.head_dim) ** -0.5 self.qkv = nn.Linear(hidden_size, 3 * hidden_size_output) self.proj = nn.Linear(hidden_size_output, hidden_size_output) self.window_size = window_size self.use_mask_unit_attn = use_mask_unit_attn def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """Input should be of shape [batch, tokens, channels].""" batch_size, seq_len, _ = hidden_states.shape num_windows = 1 if self.use_mask_unit_attn: num_windows = seq_len // (self.query_stride * self.window_size) qkv = self.qkv(hidden_states) qkv = qkv.reshape(batch_size, -1, num_windows, 3, self.num_heads, self.head_dim) qkv = qkv.permute(3, 0, 4, 2, 1, 5) query, key, value = qkv.unbind(0) if self.query_stride > 1: # Refer to unroll to see how this performs a maxpool-Nd query = query.view(batch_size, self.num_heads, num_windows, self.query_stride, -1, self.head_dim) query = query.max(dim=3).values attn_weights = (query * self.scale) @ key.transpose(-1, -2) attn_weights = attn_weights.softmax(dim=-1) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = attn_weights @ value attn_output = attn_output.transpose(1, 3).reshape(batch_size, -1, self.hidden_size_output) attn_output = self.proj(attn_output) return (attn_output, attn_weights) if output_attentions else (attn_output, None) # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Hiera class HieraDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class HieraMlp(nn.Module): def __init__(self, config, dim: int) -> None: super().__init__() self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(dim, int(dim * config.mlp_ratio)) self.fc2 = nn.Linear(int(dim * config.mlp_ratio), dim) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class HieraLayer(nn.Module): def __init__( self, config, hidden_size: int, hidden_size_output: int, num_heads: int, drop_path: float = 0.0, query_stride: int = 1, window_size: int = 0, use_mask_unit_attn: bool = False, ) -> None: super().__init__() self.hidden_size = hidden_size self.hidden_size_output = hidden_size_output self.query_stride = query_stride self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.attn = HieraMaskUnitAttention( hidden_size=hidden_size, hidden_size_output=hidden_size_output, num_heads=num_heads, query_stride=query_stride, window_size=window_size, use_mask_unit_attn=use_mask_unit_attn, ) self.layernorm_after = nn.LayerNorm(hidden_size_output, eps=config.layer_norm_eps) self.mlp = HieraMlp(config, hidden_size_output) self.drop_path = HieraDropPath(drop_path) if drop_path > 0 else nn.Identity() if hidden_size != hidden_size_output: self.proj = nn.Linear(hidden_size, hidden_size_output) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: batch_size, seq_len, _ = hidden_states.shape # Attention + Q Pooling hidden_states_norm = self.layernorm_before(hidden_states) if self.hidden_size != self.hidden_size_output: hidden_states = self.proj(hidden_states_norm) # Refer to unroll to see how this performs a maxpool-Nd hidden_states = ( hidden_states.view(batch_size, self.query_stride, -1, self.hidden_size_output).max(dim=1).values ) (hidden_states_norm, attn_weights) = self.attn( hidden_states_norm, head_mask, output_attentions=output_attentions ) hidden_states = hidden_states + self.drop_path(hidden_states_norm) residual = hidden_states hidden_states = self.layernorm_after(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + self.drop_path(hidden_states) return (hidden_states, attn_weights) class HieraStage(nn.Module): def __init__( self, config, depth: int, hidden_size: int, hidden_size_output: int, num_heads: int, drop_path: List[float], query_stride: List[int], window_size: int, use_mask_unit_attn: bool, stage_num: Optional[int] = None, ) -> None: super().__init__() # we need to know if the previous stage used masked attention # mask unit or global attention. # lag by 1 layer, so that global attention, # applied post pooling on lower resolution previous_stage_used_masked_attention = False if stage_num is not None: previous_stage_used_masked_attention = config.masked_unit_attention[stage_num - 1 if stage_num > 0 else 0] self.layers = nn.ModuleList( [ HieraLayer( config=config, hidden_size=hidden_size if i == 0 else hidden_size_output, hidden_size_output=hidden_size_output, num_heads=num_heads, drop_path=drop_path[i], query_stride=query_stride[i], window_size=window_size, use_mask_unit_attn=use_mask_unit_attn or (previous_stage_used_masked_attention and i == 0), ) for i in range(depth) ] ) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor], output_attentions: bool = False ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None (hidden_states, attn_weights) = layer_module( hidden_states, layer_head_mask, output_attentions=output_attentions ) return hidden_states, attn_weights def undo_windowing(hidden_states: torch.Tensor, shape: List[int], mask_unit_shape: List[int]) -> torch.Tensor: """ Restore spatial organization by undoing windowed organization of mask units. Args: hidden_states (`torch.Tensor`): The hidden states tensor of shape `[batch_size, num_mask_unit_height*num_mask_unit_width, hidden_size]`. shape (`List[int]`): The original shape of the hidden states tensor before windowing. mask_unit_shape (`List[int]`): The shape of the mask units used for windowing. Returns: torch.Tensor: The restored hidden states tensor of shape [batch_size, num_mask_unit_height*mask_unit_height, num_mask_unit_width*mask_unit_width, hidden_size]. """ batch_size, hidden_size = hidden_states.shape[0], hidden_states.shape[-1] # From: [batch_size, num_mask_unit_height*num_mask_unit_width, hidden_size] # To: [batch_size, num_mask_unit_height, num_mask_unit_width, mask_unit_height, mask_unit_width, hidden_size] num_mask_units = [s // mu for s, mu in zip(shape, mask_unit_shape)] hidden_states = hidden_states.view(batch_size, *num_mask_units, *mask_unit_shape, hidden_size) # From: [batch_size, num_mask_unit_height, num_mask_unit_width, mask_unit_height, mask_unit_width, hidden_size] # To: [batch_size, num_mask_unit_height*mask_unit_height, num_mask_unit_width*mask_unit_width, hidden_size] hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5) hidden_states = hidden_states.reshape(batch_size, *shape, hidden_size) return hidden_states class HieraEncoder(nn.Module): def __init__(self, config: HieraConfig) -> None: super().__init__() total_depth = sum(config.depths) # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, total_depth)] # query strides rule cumulative_depths = torch.tensor(config.depths).cumsum(0).tolist() query_pool_layer = cumulative_depths[: config.num_query_pool] query_strides = [math.prod(config.query_stride) if i in query_pool_layer else 1 for i in range(total_depth)] # Transformer blocks self.stages = nn.ModuleList() hidden_size = config.embed_dim stage_ends = [0] + cumulative_depths masked_unit_area = math.prod(config.masked_unit_size) query_stride_area = math.prod(config.query_stride) for idx_stage, depth in enumerate(config.depths): hidden_size_output = int(config.embed_dim * config.embed_dim_multiplier**idx_stage) stage = HieraStage( config=config, depth=depth, hidden_size=hidden_size, hidden_size_output=hidden_size_output, num_heads=config.num_heads[idx_stage], drop_path=dpr[stage_ends[idx_stage] : stage_ends[idx_stage + 1]], query_stride=query_strides[stage_ends[idx_stage] : stage_ends[idx_stage + 1]], window_size=int(masked_unit_area * query_stride_area**-idx_stage), use_mask_unit_attn=config.masked_unit_attention[idx_stage], stage_num=idx_stage, ) hidden_size = hidden_size_output self.stages.append(stage) # Setting reroll schedule # The first stage has to reverse everything # The next stage has to reverse all but the first unroll, etc. stage_size = [i // s for i, s in zip(config.image_size, config.patch_stride)] unroll_schedule = [config.query_stride] * len(config.depths[:-1]) self.schedule = {} for idx_stage in range(len(config.depths)): self.schedule[idx_stage] = unroll_schedule, stage_size if idx_stage < config.num_query_pool: stage_size = [i // s for i, s in zip(stage_size, config.query_stride)] unroll_schedule = unroll_schedule[1:] self.gradient_checkpointing = False def reroll( self, hidden_states: torch.Tensor, stage_idx: int, bool_masked_pos: Optional[torch.BoolTensor] = None ) -> torch.Tensor: """ Roll the given tensor back up to spatial order assuming it's from the given block. If no bool_masked_pos is provided returns: - [batch_size, height, width, hidden_size] If a bool_masked_pos is provided returns: - [batch_size, num_mask_units, mask_unit_height, mask_unit_width, hidden_size] """ schedule, size = self.schedule[stage_idx] batch_size, seq_len, hidden_size = hidden_states.shape num_dim = len(size) mask_unit_shape = [1] * num_dim for strides in schedule: # Extract the current patch from seq_len hidden_states = hidden_states.view( batch_size, *strides, seq_len // math.prod(strides), *mask_unit_shape, hidden_size ) # Move that patch into the current MU # Input: [batch_size, stride, stride, seq_len//(stride*stride), mask_unit_height, mask_unit_width, hidden_size] # Output: [batch_size, seq_len//(stride*stride), stride, mask_unit_height, stride, mask_unit_width, hidden_size] hidden_states = hidden_states.permute(0, 3, 1, 4, 2, 5, 6) # Reshape to [batch_size, seq_len//(stride*stride), *mask_units, hidden_size] for i in range(num_dim): mask_unit_shape[i] *= strides[i] hidden_states = hidden_states.reshape(batch_size, -1, *mask_unit_shape, hidden_size) seq_len = hidden_states.shape[1] # Current shape (e.g., 2d: [batch_size, #num_mask_units_height*#num_mask_units_width, mask_unit_height, mask_unit_width, hidden_size]) hidden_states = hidden_states.view(batch_size, seq_len, *mask_unit_shape, hidden_size) # If masked, return [batch_size, num_mask_units, mask_unit_height, mask_unit_width, hidden_size] if bool_masked_pos is not None: return hidden_states # If not masked, we can return [batch_size, height, width, hidden_size] hidden_states = undo_windowing(hidden_states, size, mask_unit_shape) return hidden_states def forward( self, hidden_states: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) reshaped_hidden_states = self.reroll(hidden_states, stage_idx=0, bool_masked_pos=bool_masked_pos) all_reshaped_hidden_states = all_reshaped_hidden_states + (reshaped_hidden_states,) for i, stage_module in enumerate(self.stages): layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( stage_module.__call__, hidden_states, layer_head_mask, output_attentions ) else: layer_outputs = stage_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) reshaped_hidden_states = self.reroll(hidden_states, stage_idx=i, bool_masked_pos=bool_masked_pos) all_reshaped_hidden_states = all_reshaped_hidden_states + (reshaped_hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_self_attentions, all_reshaped_hidden_states] if v is not None ) return HieraEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) def unroll( hidden_states: torch.Tensor, image_shape: Tuple[int, int], patch_stride: Tuple[int, int], schedule: List[List[int]] ) -> torch.Tensor: """ Reorders the tokens such that patches are contiguous in memory. E.g., given [batch_size, (height, width), hidden_size] and stride of (stride, stride), this will re-order the tokens as [batch_size, (stride, stride, height // stride, width // stride), hidden_size] This allows operations like Max2d to be computed as x.view(batch_size, stride*stride, -1, hidden_size).max(dim=1). Not only is this faster, but it also makes it easy to support inputs of arbitrary dimensions in addition to patch-wise sparsity. Performing this operation multiple times in sequence puts entire windows as contiguous in memory. For instance, if you applied the stride (2, 2) 3 times, entire windows of size 8x8 would be contiguous in memory, allowing operations like mask unit attention computed easily and efficiently, while also allowing max to be applied sequentially. Note: This means that intermediate values of the model are not in height x width order, so they need to be re-rolled if you want to use the intermediate values as a height x width feature map. The last block of the network is fine though, since by then the strides are all consumed. """ batch_size, _, hidden_size = hidden_states.shape size = [i // s for i, s in zip(image_shape, patch_stride)] current_size = size hidden_states = hidden_states.view(*([batch_size] + current_size + [hidden_size])) for strides in schedule: # Move patches with the given strides to the batch dimension # Create a view of the tensor with the patch stride as separate dims # For example in 2d: [batch_size, height // stride, stride, width // stride, stride, C] current_size = [i // s for i, s in zip(current_size, strides)] # initialize new_shape with [height // stride, stride, width // stride, stride] new_shape = [item for pair in zip(current_size, strides) for item in pair] # add batch_size and hidden_size to new_shape new_shape = [batch_size] + new_shape + [hidden_size] hidden_states = hidden_states.view(new_shape) # Move the patch stride into the batch dimension # For example in 2d: [batch_size, stride, stride, height // stride, width // stride, hidden_size] num_dims = len(new_shape) permute = [0] + list(range(2, num_dims - 1, 2)) + list(range(1, num_dims - 1, 2)) + [num_dims - 1] hidden_states = hidden_states.permute(permute) # Now finally flatten the relevant dims into the batch dimension hidden_states = hidden_states.flatten(0, len(strides)) batch_size *= math.prod(strides) hidden_states = hidden_states.reshape(-1, math.prod(size), hidden_size) return hidden_states class HieraPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = HieraConfig base_model_prefix = "hiera" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module) -> None: """Initialize the weights""" std = self.config.initializer_range if isinstance(module, HieraEmbeddings): nn.init.trunc_normal_(module.position_embeddings, std=std) elif isinstance(module, HieraDecoder): nn.init.trunc_normal_(module.mask_token, std=std) nn.init.trunc_normal_(module.decoder_position_embeddings, std=std) elif isinstance(module, (nn.Linear, nn.Conv1d, nn.Conv2d)): nn.init.trunc_normal_(module.weight, std=std) if module.bias is not None: nn.init.constant_(module.bias, std) elif isinstance(module, nn.LayerNorm): nn.init.constant_(module.bias, std) nn.init.constant_(module.weight, self.config.layer_norm_init) HIERA_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`HieraConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ HIERA_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`BitImageProcessor.__call__`] for details. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. interpolate_pos_encoding (`bool`, *optional*): Whether to interpolate the pre-trained position encodings. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class HieraPooler(nn.Module): def __init__(self, config: HieraConfig): super().__init__() num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1)) self.layernorm = nn.LayerNorm(num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = hidden_states.transpose(1, 2) pooled_output = self.pooler(hidden_states) pooled_output = torch.flatten(pooled_output, 1) pooled_output = self.layernorm(pooled_output) return pooled_output @add_start_docstrings( "The bare Hiera Model transformer outputting raw hidden-states without any specific head on top.", HIERA_START_DOCSTRING, """ add_pooling_layer (`bool`, *optional*, defaults to `True`): Whether or not to apply pooling layer. is_mae (`bool`, *optional*, defaults to `False`): Whether or not to run the model on MAE mode. """, ) class HieraModel(HieraPreTrainedModel): def __init__(self, config: HieraConfig, add_pooling_layer: bool = True, is_mae: bool = False): super().__init__(config) self.num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1)) self.embeddings = HieraEmbeddings(config, is_mae=is_mae) self.encoder = HieraEncoder(config) self.unroll_schedule = [config.query_stride] * len(config.depths[:-1]) self.pooler = HieraPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> HieraPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(HIERA_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=HieraModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.Tensor] = None, noise: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" noise (`torch.FloatTensor` of shape `(batch_size, num_mask_units)`, *optional*) which is mainly used for testing purposes to control randomness and maintain the reproducibility when is_mae is set to True. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, len(self.config.depths)) embedding_output, bool_masked_pos, ids_restore = self.embeddings( pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, noise=noise ) image_shape = (pixel_values.shape[-2], pixel_values.shape[-1]) hidden_states = unroll( embedding_output, image_shape=image_shape, patch_stride=self.config.patch_stride, schedule=self.unroll_schedule, ) # Discard masked tokens if bool_masked_pos is provided if bool_masked_pos is not None: mask_unit_area = math.prod(self.config.masked_unit_size) batch_size, _, hidden_size = hidden_states.shape positions = bool_masked_pos.unsqueeze(-1).tile(1, mask_unit_area, hidden_size) hidden_states = hidden_states[positions] hidden_states = hidden_states.view(batch_size, -1, hidden_size) encoder_outputs = self.encoder( hidden_states, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output) if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) head_outputs = ( head_outputs + (bool_masked_pos, ids_restore) if bool_masked_pos is not None else head_outputs ) return head_outputs + encoder_outputs[1:] return HieraModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, bool_masked_pos=bool_masked_pos, ids_restore=ids_restore, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) class HieraDecoder(nn.Module): def __init__(self, config: HieraConfig): super().__init__() num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1)) tokens_spatial_shape = [i // s for i, s in zip(config.image_size, config.patch_stride)] self.tokens_spatial_shape_final = [ i // s ** (config.num_query_pool) for i, s in zip(tokens_spatial_shape, config.query_stride) ] self.mask_unit_spatial_shape_final = [ i // s ** (config.num_query_pool) for i, s in zip(config.masked_unit_size, config.query_stride) ] self.decoder_embeddings = nn.Linear(num_features, config.decoder_hidden_size) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) self.decoder_position_embeddings = nn.Parameter( torch.zeros(1, math.prod(self.tokens_spatial_shape_final), config.decoder_hidden_size) ) self.decoder_block = HieraStage( config=config, hidden_size=config.decoder_hidden_size, hidden_size_output=config.decoder_hidden_size, num_heads=config.decoder_num_heads, depth=config.decoder_depth, use_mask_unit_attn=False, drop_path=[0.0] * config.decoder_depth, query_stride=[1] * config.decoder_depth, window_size=0, ) self.decoder_norm = nn.LayerNorm(config.decoder_hidden_size, eps=config.layer_norm_eps) # patch stride of prediction self.pred_stride = config.patch_stride[-1] * (config.query_stride[-1] ** config.num_query_pool) pred_dim = (self.pred_stride ** len(config.query_stride)) * config.num_channels self.decoder_pred = nn.Linear(config.decoder_hidden_size, pred_dim) def forward( self, encoder_hidden_states: torch.Tensor, bool_masked_pos: torch.BoolTensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, torch.BoolTensor]: # Embed tokens hidden_states = self.decoder_embeddings(encoder_hidden_states) # Combine visible and bool_masked_pos tokens # hidden_states : [batch_size, num_mask_units_visible, *mask_unit_spatial_shape_final, decoder_hidden_size] # bool_masked_pos: [batch_size, num_mask_units] mask_unit_height, mask_unit_width, decoder_hidden_size = hidden_states.shape[2:] batch_size, num_mask_units = bool_masked_pos.shape decoder_hidden_states = torch.zeros( batch_size, num_mask_units, mask_unit_height, mask_unit_width, decoder_hidden_size, device=hidden_states.device, dtype=hidden_states.dtype, ) mask_tokens = self.mask_token.view(1, 1, 1, 1, -1) bool_masked_pos = bool_masked_pos.reshape(batch_size, num_mask_units, 1, 1, 1) bool_masked_pos = bool_masked_pos.expand(-1, -1, mask_unit_height, mask_unit_width, decoder_hidden_size) decoder_hidden_states[bool_masked_pos] = hidden_states.flatten() decoder_hidden_states = ( 1 - bool_masked_pos.float() ) * mask_tokens + bool_masked_pos.float() * decoder_hidden_states # Get back spatial order hidden_states = undo_windowing( decoder_hidden_states, self.tokens_spatial_shape_final, self.mask_unit_spatial_shape_final, ) bool_masked_pos = undo_windowing( bool_masked_pos[..., 0:1], self.tokens_spatial_shape_final, self.mask_unit_spatial_shape_final, ) # Flatten hidden_states = hidden_states.reshape(hidden_states.shape[0], -1, hidden_states.shape[-1]) bool_masked_pos = bool_masked_pos.view(hidden_states.shape[0], -1) # Add pos embed hidden_states = hidden_states + self.decoder_position_embeddings # Apply decoder blocks hidden_states, attn_weights = self.decoder_block( hidden_states, head_mask=head_mask, output_attentions=output_attentions ) hidden_states = self.decoder_norm(hidden_states) # Predictor projection hidden_states = self.decoder_pred(hidden_states) return hidden_states, bool_masked_pos class HieraMultiScaleHead(nn.Module): def __init__(self, config: HieraConfig): super().__init__() self.mask_unit_spatial_shape_final = [ i // s ** (config.num_query_pool) for i, s in zip(config.masked_unit_size, config.query_stride) ] self.stage_dimensions = [ int(config.embed_dim * config.embed_dim_multiplier**i) for i in range(len(config.depths)) ] current_masked_unit_size = config.masked_unit_size self.multi_scale_fusion_heads = nn.ModuleList() for idx in range(config.num_query_pool): kernel = [i // s for i, s in zip(current_masked_unit_size, self.mask_unit_spatial_shape_final)] current_masked_unit_size = [i // s for i, s in zip(current_masked_unit_size, config.query_stride)] self.multi_scale_fusion_heads.append( nn.Conv2d( self.stage_dimensions[idx], self.stage_dimensions[-1], kernel_size=kernel, stride=kernel, ) ) self.multi_scale_fusion_heads.append(nn.Identity()) def apply_fusion_head(self, head: nn.Module, hidden_states: torch.Tensor) -> torch.Tensor: if isinstance(head, nn.Identity): return hidden_states # Doing explicit to avoid problems with torch.fx batch_size, num_mask_units, mask_unit_height, mask_unit_width, hidden_size = hidden_states.shape # From: [batch_size, num_mask_units, mask_unit_height, mask_unit_width, hidden_size] # To: head([batch_size * num_mask_units, hidden_size, mask_unit_height, mask_unit_width]) hidden_states = hidden_states.reshape( batch_size * num_mask_units, mask_unit_height, mask_unit_width, hidden_size ) hidden_states = hidden_states.permute(0, 3, 1, 2) hidden_states = head(hidden_states) # Restore original layout hidden_states = hidden_states.permute(0, 2, 3, 1) mask_unit_height_final, mask_unit_width_final, hidden_size = hidden_states.shape[1:] hidden_states = hidden_states.reshape( batch_size, num_mask_units, mask_unit_height_final, mask_unit_width_final, hidden_size ) return hidden_states def forward(self, feature_maps: List[torch.Tensor]) -> torch.Tensor: # Multi-scale fusion hidden_states = 0.0 for head, feature_map in zip(self.multi_scale_fusion_heads, feature_maps): hidden_states = hidden_states + self.apply_fusion_head(head, feature_map) return hidden_states @add_start_docstrings( """The Hiera Model transformer with the decoder on top for self-supervised pre-training. <Tip> Note that we provide a script to pre-train this model on custom data in our [examples directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). </Tip> """, HIERA_START_DOCSTRING, ) class HieraForPreTraining(HieraPreTrainedModel): def __init__(self, config: HieraConfig) -> None: super().__init__(config) # Encoder self.hiera = HieraModel(config, add_pooling_layer=False, is_mae=True) self.encoder_norm = nn.LayerNorm(self.hiera.num_features, eps=config.layer_norm_eps) # Multi-scale fusion heads self.multiscale_fusion = HieraMultiScaleHead(config) # Decoder self.decoder = HieraDecoder(config) self.pred_stride = self.decoder.pred_stride # Initialize weights and apply final processing self.post_init() def get_pixel_label_2d(self, pixel_values: torch.Tensor, bool_masked_pos: torch.BoolTensor) -> torch.Tensor: # bool_masked_pos (boolean tensor): True means *masked* pixel_values = pixel_values.permute(0, 2, 3, 1) size = self.pred_stride label = pixel_values.unfold(1, size, size).unfold(2, size, size) label = label.flatten(1, 2).flatten(2) label = label[bool_masked_pos] if self.config.normalize_pixel_loss: mean = label.mean(dim=-1, keepdim=True) var = label.var(dim=-1, keepdim=True) label = (label - mean) / (var + 1.0e-6) ** 0.5 return label def forward_loss(self, pixel_values: torch.Tensor, logits: torch.Tensor, bool_masked_pos: torch.BoolTensor): # We invert the bool_masked_pos such that 1.0 is *masked* bool_masked_pos = ~bool_masked_pos label = self.get_pixel_label_2d(pixel_values, bool_masked_pos) logits = logits[bool_masked_pos] loss = (logits - label) ** 2 loss = loss.mean() return loss @add_start_docstrings_to_model_forward(HIERA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=HieraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, noise: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, HieraForPreTrainingOutput]: r""" noise (`torch.FloatTensor` of shape `(batch_size, num_mask_units)`, *optional*) which is mainly used for testing purposes to control randomness and maintain the reproducibility when is_mae is set to True. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, HieraForPreTraining >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/hiera-tiny-224-mae-hf") >>> model = HieraForPreTraining.from_pretrained("facebook/hiera-tiny-224-mae-hf") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> loss = outputs.loss >>> print(list(logits.shape)) [1, 196, 768] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.hiera( pixel_values, noise=noise, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) feature_maps = outputs[-1] bool_masked_pos = outputs[1] ids_to_restore = outputs[2] # Take only the query pooled and last hidden states feature_maps = feature_maps[1 : self.hiera.config.num_query_pool + 1] + (feature_maps[-1],) fused_hidden_states = self.multiscale_fusion(feature_maps) fused_hidden_states = self.encoder_norm(fused_hidden_states) # Reconstruct pixel values logits, bool_masked_pos = self.decoder( fused_hidden_states, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, ) loss = self.forward_loss(pixel_values, logits, bool_masked_pos) if not return_dict: output = (logits, bool_masked_pos, ids_to_restore) if output_hidden_states: output = output + (outputs[3],) if output_attentions: output = output + (outputs[4],) if output_hidden_states: output = output + (outputs[-1],) return ((loss,) + output) if loss is not None else output return HieraForPreTrainingOutput( loss=loss, logits=logits, bool_masked_pos=bool_masked_pos, ids_restore=ids_to_restore, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states if output_hidden_states else None, ) @add_start_docstrings( """ Hiera Model transformer with an image classification head on top (a linear layer on top of the final hidden state with average pooling) e.g. for ImageNet. <Tip> Note that it's possible to fine-tune Hiera on higher resolution images than the ones it has been trained on, by setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained position embeddings to the higher resolution. </Tip> """, HIERA_START_DOCSTRING, ) class HieraForImageClassification(HieraPreTrainedModel): def __init__(self, config: HieraConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.hiera = HieraModel(config, add_pooling_layer=True, is_mae=False) # Classifier head self.classifier = ( nn.Linear(self.hiera.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(HIERA_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=HieraForImageClassificationOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, HieraForImageClassificationOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.hiera( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return HieraForImageClassificationOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, ) @add_start_docstrings( """ Hiera backbone, to be used with frameworks like DETR and MaskFormer. """, HIERA_START_DOCSTRING, ) class HieraBackbone(HieraPreTrainedModel, BackboneMixin): def __init__(self, config: HieraConfig): super().__init__(config) super()._init_backbone(config) self.num_features = [config.embed_dim] + [ int(config.embed_dim * config.embed_dim_multiplier**i) for i in range(len(config.depths)) ] self.embeddings = HieraEmbeddings(config, is_mae=False) self.encoder = HieraEncoder(config) # Add layer norms to hidden states of out_features hidden_states_norms = {} for stage, num_channels in zip(self._out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("facebook/hiera-tiny-224-hf") >>> model = AutoBackbone.from_pretrained( ... "facebook/hiera-tiny-224-hf", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 768, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output, _, _ = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, head_mask=None, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict, ) hidden_states = outputs[-1] feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: batch_size, height, width, num_channels = hidden_state.shape hidden_state = hidden_state.view(batch_size, height * width, num_channels) hidden_state = self.hidden_states_norms[stage](hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs[1],) if output_attentions: output += (outputs[2],) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs[1] if output_hidden_states else None, attentions=outputs[2] if output_attentions else None, )
transformers/src/transformers/models/hiera/modeling_hiera.py/0
{ "file_path": "transformers/src/transformers/models/hiera/modeling_hiera.py", "repo_id": "transformers", "token_count": 29340 }
368
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 Idefics model.""" from __future__ import annotations from dataclasses import dataclass from typing import List, Optional, Tuple, Union import tensorflow as tf from ... import TFPreTrainedModel from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ModelOutput from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, keras_serializable, shape_list, unpack_inputs, ) from ...tf_utils import invert_attention_mask, scaled_dot_product_attention from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_idefics import IdeficsConfig from .perceiver_tf import TFIdeficsPerceiverResampler from .vision_tf import TFIdeficsVisionTransformer logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "IdeficsConfig" @dataclass class TFIdeficsBaseModelOutputWithPast(ModelOutput): """ Base class for Idefics model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(tf.Tensor)`, *optional*): Tuple of `tf.Tensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver """ last_hidden_state: tf.Tensor = None past_key_values: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None image_hidden_states: Optional[Tuple[tf.Tensor]] = None @dataclass class TFIdeficsCausalLMOutputWithPast(ModelOutput): """ Base class for Idefics causal language model (or autoregressive) outputs. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(tf.Tensor)`, *optional*): Tuple of `tf.Tensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None past_key_values: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None image_hidden_states: Optional[Tuple[tf.Tensor]] = None def expand_inputs_for_generation( input_ids, expand_size=1, is_encoder_decoder=False, attention_mask=None, encoder_outputs=None, **model_kwargs, ): expanded_return_idx = tf.reshape(tf.repeat(tf.range(tf.shape(input_ids)[0]), expand_size), [-1]) input_ids = tf.gather(input_ids, expanded_return_idx) model_kwargs["pixel_values"] = model_kwargs.get("pixel_values", None) model_kwargs["image_encoder_embeddings"] = model_kwargs.get("image_encoder_embeddings", None) model_kwargs["perceiver_embeddings"] = model_kwargs.get("perceiver_embeddings", None) model_kwargs["image_attention_mask"] = model_kwargs.get("image_attention_mask", None) if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = tf.gather(token_type_ids, expanded_return_idx) if attention_mask is not None: model_kwargs["attention_mask"] = tf.gather(attention_mask, expanded_return_idx) if model_kwargs["image_attention_mask"] is not None: model_kwargs["image_attention_mask"] = tf.gather(model_kwargs["image_attention_mask"], expanded_return_idx) if model_kwargs["pixel_values"] is not None: model_kwargs["pixel_values"] = tf.gather(model_kwargs["pixel_values"], expanded_return_idx) elif model_kwargs["image_encoder_embeddings"] is not None: model_kwargs["image_encoder_embeddings"] = tf.gather( model_kwargs["image_encoder_embeddings"], expanded_return_idx ) elif model_kwargs["perceiver_embeddings"] is not None: model_kwargs["perceiver_embeddings"] = tf.gather(model_kwargs["perceiver_embeddings"], expanded_return_idx) return input_ids, model_kwargs def update_model_kwargs_for_generation(outputs, model_kwargs): # must have this key set to at least None if "past_key_values" in outputs: model_kwargs["past_key_values"] = outputs.past_key_values else: model_kwargs["past_key_values"] = None # update token_type_ids with last value if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = tf.concat([token_type_ids, token_type_ids[:, -1:, ...]], axis=-1) # update attention masks if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = tf.concat( [attention_mask, tf.ones_like(attention_mask[:, -1:, ...])], axis=-1 ) if "image_attention_mask" in model_kwargs: image_attention_mask = model_kwargs["image_attention_mask"] last_mask = image_attention_mask[:, -1:, ...] model_kwargs["image_attention_mask"] = last_mask # Get the precomputed image_hidden_states model_kwargs["image_hidden_states"] = outputs.image_hidden_states return model_kwargs def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs): token_type_ids = kwargs.get("token_type_ids", None) # only last token for inputs_ids if past is defined in kwargs if past_key_values is not None: input_ids = input_ids[:, -1:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -1:] attention_mask = kwargs.get("attention_mask", None) position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = tf.math.cumsum(tf.cast(attention_mask, dtype=tf.int64), axis=-1) - 1 position_ids = tf.where(attention_mask == 0, 1, position_ids) if past_key_values is not None: position_ids = position_ids[:, -1:] pixel_values = kwargs.get("pixel_values", None) image_encoder_embeddings = kwargs.get("image_encoder_embeddings", None) perceiver_embeddings = kwargs.get("perceiver_embeddings", None) image_attention_mask = kwargs.get("image_attention_mask", None) interpolate_pos_encoding = kwargs.get("interpolate_pos_encoding", False) return { "input_ids": input_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, "pixel_values": pixel_values, "image_encoder_embeddings": image_encoder_embeddings, "perceiver_embeddings": perceiver_embeddings, "image_attention_mask": image_attention_mask, "interpolate_pos_encoding": interpolate_pos_encoding, } def freeze_model(model, module_exceptions=[]): mapping = { "LayerNorm": tf.keras.layers.LayerNormalization, "Dense": tf.keras.layers.Dense, "Embedding": tf.keras.layers.Embedding, } module_exceptions_mapped = [mapping[m] for m in module_exceptions] if not hasattr(model, "layers"): model.trainable = False # It is just a layer return model for layer in model.layers: if module_exceptions and any(isinstance(layer, t) for t in module_exceptions_mapped): layer.trainable = True # Explicitly setting it to true to avoid any mistakes else: layer.trainable = False return model class TFIdeficsDecoupledEmbedding(tf.keras.layers.Embedding): """ Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings. In practise, the regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0, then it will create `num_additional_embeddings` additional parameters that are always trained. If `num_additional_embeddings=0`, then the module defaults back to the regular behavior of `tf.keras.layers.Embedding`. """ def __init__( self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: Optional[bool] = False, dtype=None, **kwargs, ) -> None: """ Args: num_embeddings (`int`): Size of the dictionary of embeddings num_additional_embeddings (`int`): Number of additional embeddings. Only useful when you `partially_freeze=True`. embedding_dim (`int`): The size of each embedding vector partially_freeze: (`bool`, *optional*, defaults to `False`): If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen. Note: there are a lot of other parameters to initialize a standard `tf.keras.layers.Embedding` such as `mask_zero`, `input_length` or `embeddings_initializer`. We are not supporting these. """ super().__init__( input_dim=num_embeddings, output_dim=embedding_dim, dtype=dtype, **kwargs, ) self.num_embeddings = num_embeddings self.num_additional_embeddings = num_additional_embeddings self.partially_freeze = partially_freeze if partially_freeze: self.trainable = False if self.num_additional_embeddings > 0: self.additional_embedding = tf.keras.layers.Embedding( input_dim=self.num_additional_embeddings, output_dim=embedding_dim, dtype=dtype, name="additional_embedding", ) def call(self, input_ids): """ we have 2 embeddings, with different indices - one pretrained self.weight and another self.additional_embedding.weight that is being trained. in order to make a lookup of the input ids, we: 1. find out the indices of the entries belonging to the 2nd embedding 2. extract those values while subtracting the size of the first embedding (num_embeddings), since the 2nd embedding starts from 0 and not num_embeddings 3. perform the 2nd embedding lookup 4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index 5. perform the 1st embedding lookup 6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup note: for the 1st embedding lookup we could have looked up only the low indices and not do the padding, but then we have to create a new tensor and populate it with 2 tensors that are spread out across various indices - i.e. not a simple concat - I haven't benchmarked the complex case if it's any faster, given that seqlens are usually relatively short it's probably not faster or if faster not by much - but might be a good idea to measure. """ if self.num_additional_embeddings == 0: return super().call(input_ids) # Clone so that we don't modify the original input_ids later on input_ids = tf.identity(input_ids) additional_vocab_indices = tf.where(input_ids >= self.num_embeddings) input_ids_additional_vocab = tf.gather_nd(input_ids, additional_vocab_indices) additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings) # for successful lookup replace input_ids with 0, the results of these will be discarded anyway input_ids = tf.tensor_scatter_nd_update( input_ids, additional_vocab_indices, # tensor filled with 0, having the same length as additional_vocab_indices tf.zeros(tf.shape(additional_vocab_indices)[0], dtype=input_ids.dtype), ) full_vector = super().call(input_ids) # overwrite the records with high indices full_vector = tf.tensor_scatter_nd_update(full_vector, additional_vocab_indices, additional_embeddings) return full_vector def extra_repr(self) -> str: return "num_embeddings={}, num_additional_embeddings={}, embedding_dim={}, partially_freeze={}".format( self.num_embeddings, self.num_additional_embeddings, self.output_dim, self.partially_freeze, ) class TFIdeficsDecoupledLinear(tf.keras.layers.Layer): """ Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0, then it will create `out_additional_features * in_features` additional parameters that are always trained. If `out_additional_features=0`, then the module defaults back to the regular behavior of `tf.keras.layers.Dense`. """ def __init__( self, in_features: int, out_features: int, out_additional_features: int = 0, bias: bool = True, partially_freeze: bool = True, **kwargs, ) -> None: """ out_additional_features: int. Number of additional trainable dimensions. Only makes sense when `partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra parameters (if any) will be trainable. If False, default to the regular behavior of tf.keras.layers.Dense. """ super().__init__(**kwargs) self.out_additional_features = out_additional_features self.partially_freeze = partially_freeze self.in_features = in_features self.out_features = out_features self.use_bias = bias if out_additional_features > 0: self.additional_fc = tf.keras.layers.Dense( units=out_additional_features, use_bias=bias, name="additional_fc" ) def call(self, inputs: tf.Tensor) -> tf.Tensor: output = tf.linalg.matmul(a=inputs, b=self.weight, transpose_b=True) if self.bias is not None: output = tf.nn.bias_add(output, self.bias) if self.out_additional_features > 0: additional_features = self.additional_fc(inputs) output = tf.concat([output, additional_features], axis=-1) return output def get_config(self): config = super().get_config() config.update( { "in_features": self.in_features, "out_features": self.out_features, "out_additional_features": self.out_additional_features, "bias": self.bias is not None, "partially_freeze": self.partially_freeze, } ) return config def extra_repr(self) -> str: """Overwriting `nn.Linear.extra_repr` to include new parameters.""" return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format( self.in_features, self.out_features, self.out_additional_features, self.bias is not None, self.partially_freeze, ) @classmethod def from_config(cls, config): return cls(**config) def build(self, input_shape=None): if self.built: return self.built = True self.weight = self.add_weight( shape=(self.out_features, self.in_features), trainable=not self.partially_freeze, name="weight" ) if self.use_bias: self.bias = self.add_weight(shape=(self.out_features,), trainable=not self.partially_freeze, name="bias") else: self.bias = None if getattr(self, "additional_fc", None) is not None: with tf.name_scope(self.additional_fc.name): self.additional_fc.build(self.in_features) def _make_causal_mask(input_ids_shape, dtype, past_key_values_length=0): """ Make causal mask used for bi-directional self-attention, supporting both static and dynamic shapes. """ bsz, tgt_len = input_ids_shape # Create a matrix where only the lower triangle and diagonal are filled with zeros (causal mask) mask = tf.fill((tgt_len, tgt_len), tf.dtypes.as_dtype(dtype).min) mask_cond = tf.range(tgt_len) mask = tf.where(mask_cond[:, None] >= mask_cond[None, :], 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length), dtype=dtype), mask], axis=-1) if bsz is None: # When batch size is dynamic, expand and tile # so we can compile a functional model mask = tf.expand_dims(mask, 0) mask = tf.expand_dims(mask, 0) # shape: (1, 1, tgt_len, tgt_len + past_key_values_length) mask = tf.tile(mask, [bsz, 1, 1, 1]) else: # When batch size is static, directly use broadcast_to mask = tf.broadcast_to(mask[None, None, :, :], (bsz, 1, tgt_len, tgt_len + past_key_values_length)) return mask def _expand_mask(mask, dtype, tgt_len=None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = shape_list(mask) tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = tf.expand_dims(tf.expand_dims(mask, 1), 1) expanded_mask = tf.broadcast_to(expanded_mask, [bsz, 1, tgt_len, src_len]) inverted_mask = 1.0 - tf.cast(expanded_mask, dtype) return tf.where( tf.cast(inverted_mask, bool), tf.fill(dims=shape_list(inverted_mask), value=tf.float32.min), inverted_mask ) class TFIdeficsRMSNorm(tf.keras.layers.Layer): def __init__(self, hidden_size, eps=1e-6, **kwargs): """ TFIdeficsRMSNorm is equivalent to T5LayerNorm """ super().__init__(**kwargs) self.hidden_size = hidden_size self.variance_epsilon = eps def build(self, input_shape): if self.built: return self.built = True self.weight = self.add_weight(name="weight", shape=[self.hidden_size], initializer="ones") super().build(input_shape) def call(self, hidden_states): variance = tf.math.reduce_mean(tf.math.square(tf.cast(hidden_states, tf.float32)), axis=-1, keepdims=True) hidden_states = hidden_states * tf.math.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [tf.float16, tf.bfloat16]: hidden_states = tf.cast(hidden_states, self.weight.dtype) return self.weight * hidden_states class TFIdeficsEmbedding(tf.keras.layers.Layer): def __init__(self, dim, max_position_embeddings=2048, base=10000, **kwargs): super().__init__(**kwargs) self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base self.inv_freq = tf.constant( 1.0 / (self.base ** (tf.range(start=0, limit=self.dim, delta=2, dtype=tf.float32) / self.dim)) ) def _compute_cos_sin(self, seq_len): t = tf.range(seq_len, dtype=self.inv_freq.dtype) freqs = tf.einsum("i, j -> ij", t, self.inv_freq) # Outer multiplication emb = tf.concat((freqs, freqs), axis=-1) return tf.cos(emb), tf.sin(emb) def call(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len is None: seq_len = shape_list(x)[2] return self._compute_cos_sin(seq_len=seq_len) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return tf.concat((-x2, x1), axis=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): cos = tf.gather(cos, position_ids) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim] sin = tf.gather(sin, position_ids) cos = tf.expand_dims(cos, 1) sin = tf.expand_dims(sin, 1) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class TFIdeficsMLP(tf.keras.layers.Layer): def __init__( self, hidden_size: int, intermediate_size: int, hidden_act: str, **kwargs, ): super().__init__(**kwargs) self.gate_proj = tf.keras.layers.Dense(intermediate_size, use_bias=False, name="gate_proj") self.down_proj = tf.keras.layers.Dense(hidden_size, use_bias=False, name="down_proj") self.up_proj = tf.keras.layers.Dense(intermediate_size, use_bias=False, name="up_proj") self.act_fn = get_tf_activation(hidden_act) self.intermediate_size = intermediate_size self.hidden_size = hidden_size def call(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "gate_proj", None) is not None: with tf.name_scope(self.gate_proj.name): self.gate_proj.build(self.hidden_size) if getattr(self, "down_proj", None) is not None: with tf.name_scope(self.down_proj.name): self.down_proj.build(self.intermediate_size) if getattr(self, "up_proj", None) is not None: with tf.name_scope(self.up_proj.name): self.up_proj.build(self.hidden_size) class TFIdeficsAttention(tf.keras.layers.Layer): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, hidden_size: int, num_heads: int, dropout: float = 0.0, is_cross_attention: bool = False, config: IdeficsConfig = None, qk_layer_norms: bool = False, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_heads = num_heads self.head_dim = hidden_size // num_heads self.dropout = dropout self.config = config self.is_causal = True if (self.head_dim * num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {num_heads})." ) self.is_cross_attention = is_cross_attention self.q_proj = tf.keras.layers.Dense( num_heads * self.head_dim, use_bias=False, name="q_proj", ) self.k_proj = tf.keras.layers.Dense( num_heads * self.head_dim, use_bias=False, name="k_proj", ) self.v_proj = tf.keras.layers.Dense( num_heads * self.head_dim, use_bias=False, name="v_proj", ) self.o_proj = tf.keras.layers.Dense( hidden_size, use_bias=False, name="o_proj", ) self.rotary_emb = TFIdeficsEmbedding(self.head_dim, name="rotary_emb") self.qk_layer_norms = qk_layer_norms if self.qk_layer_norms: self.q_layer_norm = TFIdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps, name="q_layer_norm") self.k_layer_norm = TFIdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps, name="k_layer_norm") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, key_value_states: Optional[tf.Tensor] = None, attention_mask: Optional[tf.Tensor] = None, position_ids: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[tf.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[tf.Tensor, Optional[tf.Tensor], Optional[Tuple[tf.Tensor]]]: # if key_value_states are provided this layer is used as a cross-attention layer is_cross_attention = self.is_cross_attention or key_value_states is not None bsz, q_len, _ = shape_list(hidden_states) query_states = self._shape(self.q_proj(hidden_states), q_len, bsz) if not is_cross_attention: key_states = self._shape(self.k_proj(hidden_states), q_len, bsz) value_states = self._shape(self.v_proj(hidden_states), q_len, bsz) else: _, kv_len, _ = shape_list(key_value_states) # Note that, in this case, `kv_len` == `kv_seq_len` key_states = self._shape(self.k_proj(key_value_states), kv_len, bsz) value_states = self._shape(self.v_proj(key_value_states), kv_len, bsz) kv_seq_len = shape_list(key_states)[-2] if past_key_value is not None: kv_seq_len += shape_list(past_key_value[0])[-2] if not is_cross_attention: # Below is to allow symbolic tensors compilation if tf.is_tensor(kv_seq_len): seq_len = tf.reduce_max(kv_seq_len, q_len) else: seq_len = max(kv_seq_len, q_len) cos, sin = self.rotary_emb(value_states, seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) past_key_value = (key_states, value_states) if use_cache else None if self.qk_layer_norms: query_states = self.q_layer_norm(query_states) key_states = self.k_layer_norm(key_states) tf.debugging.assert_equal( tf.shape(attention_mask), [bsz, 1, q_len, kv_seq_len], message=f"Attention weights should be of size {[bsz, 1, q_len, kv_seq_len]}, but is {tf.shape(attention_mask)}", ) attn_output = scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. is_causal=self.is_causal and attention_mask is None and q_len > 1, ) tf.debugging.assert_equal( tf.shape(attn_output), [bsz, self.num_heads, q_len, self.head_dim], message=f"Attention weights should be of size {[bsz, self.num_heads, q_len, self.head_dim]}, but is {tf.shape(attn_output)}", ) attn_output = tf.reshape(tf.transpose(attn_output, perm=[0, 2, 1, 3]), (bsz, q_len, self.hidden_size)) attn_output = self.o_proj(attn_output) attn_weights = None if output_attentions: logger.warning_once( "attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead" ) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if self.is_cross_attention: kv_input_dim = ( self.hidden_size if not hasattr(self.config.vision_config, "embed_dim") else self.config.vision_config.embed_dim ) else: kv_input_dim = self.hidden_size if getattr(self, "o_proj", None) is not None: with tf.name_scope(self.o_proj.name): self.o_proj.build(self.num_heads * self.head_dim) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build(self.hidden_size) if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build(kv_input_dim) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build(kv_input_dim) if getattr(self, "rotary_emb", None) is not None: with tf.name_scope(self.rotary_emb.name): self.rotary_emb.build(None) class TFIdeficsDecoderLayer(tf.keras.layers.Layer): def __init__(self, config: IdeficsConfig, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size self.self_attn = TFIdeficsAttention( hidden_size=self.hidden_size, num_heads=config.num_attention_heads, dropout=config.dropout, config=config, name="self_attn", ) self.mlp = TFIdeficsMLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, name="mlp", ) self.input_layernorm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name="input_layernorm") self.post_attention_layernorm = TFIdeficsRMSNorm( config.hidden_size, eps=config.rms_norm_eps, name="post_attention_layernorm" ) self.dropout = config.dropout def call( self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, position_ids: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[tf.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, training=False, ) -> Tuple[tf.Tensor, Optional[Tuple[tf.Tensor, tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "input_layernorm", None) is not None: with tf.name_scope(self.input_layernorm.name): self.input_layernorm.build(None) if getattr(self, "post_attention_layernorm", None) is not None: with tf.name_scope(self.post_attention_layernorm.name): self.post_attention_layernorm.build(None) class TFIdeficsGatedCrossAttentionLayer(tf.keras.layers.Layer): def __init__(self, config: IdeficsConfig, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size self.cross_attn = TFIdeficsAttention( hidden_size=self.hidden_size, num_heads=config.num_attention_heads, is_cross_attention=True, dropout=config.dropout, config=config, qk_layer_norms=config.qk_layer_norms, name="cross_attn", ) self.mlp = TFIdeficsMLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, name="mlp", ) self.input_layernorm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name="input_layernorm") self.post_attention_layernorm = TFIdeficsRMSNorm( config.hidden_size, eps=config.rms_norm_eps, name="post_attention_layernorm" ) self.config = config.dropout self.act_cross_attn = tf.keras.activations.tanh self.act_dense = tf.keras.activations.tanh self.alpha_initializer = config.alpha_initializer self.alpha_type = config.alpha_type self.alphas_initializer_range = config.alphas_initializer_range def build(self, input_shape): if self.built: return self.built = True if self.alpha_initializer == "zeros": if self.alpha_type == "vector": self.alpha_cross_attn = self.add_weight( shape=(1, 1, self.hidden_size), initializer="zeros", trainable=True, name="alpha_cross_attn" ) self.alpha_dense = self.add_weight( shape=(1, 1, self.hidden_size), initializer="zeros", trainable=True, name="alpha_dense" ) elif self.alpha_type == "float": self.alpha_cross_attn = self.add_weight( shape=(1,), initializer="zeros", trainable=True, name="alpha_cross_attn" ) self.alpha_dense = self.add_weight(shape=(1,), initializer="zeros", trainable=True, name="alpha_dense") else: raise ValueError(f"Unknown value for `alpha_type` ({self.alpha_type})") elif self.alpha_initializer == "ones": if self.alpha_type == "vector": self.alpha_cross_attn = self.add_weight( shape=(1, 1, self.hidden_size), initializer="ones", trainable=True, name="alpha_cross_attn" ) self.alpha_dense = self.add_weight( shape=(1, 1, self.hidden_size), initializer="ones", trainable=True, name="alpha_dense" ) elif self.alpha_type == "float": self.alpha_cross_attn = self.add_weight( shape=(1,), initializer="ones", trainable=True, name="alpha_cross_attn" ) self.alpha_dense = self.add_weight(shape=(1,), initializer="ones", trainable=True, name="alpha_dense") else: raise ValueError(f"Unknown value for `alpha_type` ({self.alpha_type})") elif self.alpha_initializer in {"normal", "gaussian", "random"}: if self.alpha_type == "vector": self.alpha_cross_attn = self.add_weight( shape=(1, 1, self.hidden_size), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name="alpha_cross_attn", ) self.alpha_dense = self.add_weight( shape=(1, 1, self.hidden_size), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name="alpha_dense", ) elif self.alpha_type == "float": self.alpha_cross_attn = self.add_weight( shape=(1,), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name="alpha_type", ) self.alpha_dense = self.add_weight( shape=(1,), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name="alpha_dense", ) else: raise ValueError(f"Unknown value for `alpha_type` ({self.alpha_type})") else: raise NotImplementedError(f"Alpha initialization scheme {self.alpha_initializer} not yet implemented!") if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")): raise ValueError("Alpha parameters not initialized correctly!") with tf.name_scope(self.cross_attn.name): self.cross_attn.build(None) with tf.name_scope(self.mlp.name): self.mlp.build(None) with tf.name_scope(self.input_layernorm.name): self.input_layernorm.build(None) with tf.name_scope(self.post_attention_layernorm.name): self.post_attention_layernorm.build(None) super().build(input_shape) def call( self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, image_hidden_states: Optional[tf.Tensor] = None, image_attention_mask: Optional[tf.Tensor] = None, cross_attention_gate: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, past_key_value: Optional[Tuple[tf.Tensor]] = None, ) -> Tuple[tf.Tensor, Optional[Tuple[tf.Tensor, tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states no_images (`bool`, *optional*, defaults to `False`): If `True` the vision part is ignored """ if image_hidden_states is None: raise ValueError( "`image_hidden_states` is required for Idefics cross attention module which are visual features to be" " conditioned on." ) if cross_attention_gate is None: raise ValueError( "`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images." ) if past_key_value is not None: raise NotImplementedError("Past key value states are not implemented for Idefics cross attention module.") residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.cross_attn( hidden_states=hidden_states, key_value_states=image_hidden_states, attention_mask=image_attention_mask, output_attentions=output_attentions, ) hidden_states = tf.nn.dropout(hidden_states, rate=self.config) mask = tf.cast(cross_attention_gate == 0, dtype=hidden_states.dtype) # Expand dimensions of mask to match hidden_states mask = tf.expand_dims(mask, -1) hidden_states = tf.where( tf.broadcast_to(mask, tf.shape(hidden_states)) == 1, tf.zeros_like(hidden_states), hidden_states ) # when there are no images the model is used in pure language mode # gate = 0 if no_images else 1 hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = tf.nn.dropout(hidden_states, rate=self.config) hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs LLAMA_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a TensorFlow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) subclass. Use it as a regular TensorFlow Layer and refer to the TensorFlow documentation for all matter related to general usage and behavior. Parameters: config ([`IdeficsConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAMA_START_DOCSTRING, ) class TFIdeficsPreTrainedModel(TFPreTrainedModel): config_class = IdeficsConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["TFIdeficsDecoderLayer", "TFIdeficsGatedCrossAttentionLayer"] LLAMA_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAMA_START_DOCSTRING, ) @keras_serializable class TFIdeficsMainLayer(tf.keras.layers.Layer): """ Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`] Args: config: IdeficsConfig """ config_class = IdeficsConfig def __init__(self, config: IdeficsConfig, add_pooling_year: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = TFIdeficsDecoupledEmbedding( num_embeddings=config.vocab_size, num_additional_embeddings=config.additional_vocab_size, embedding_dim=config.hidden_size, partially_freeze=config.freeze_text_layers, name="embed_tokens", ) self.image_size = config.vision_config.image_size self.vision_config = config.vision_config self.vision_model = TFIdeficsVisionTransformer(config.vision_config, name="vision_model") # Perceiver Resampler if config.use_resampler: perceiver_config = config.perceiver_config self.perceiver_resampler = TFIdeficsPerceiverResampler( config, config.vision_config.embed_dim, perceiver_config.resampler_depth, perceiver_config.resampler_n_heads, perceiver_config.resampler_head_dim, perceiver_config.resampler_n_latents, name="perceiver_resampler", ) self.decoder_layers = [ TFIdeficsDecoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers) ] self.cross_layer_interval = config.cross_layer_interval num_cross_layers = config.num_hidden_layers // self.cross_layer_interval self.gated_cross_attn_layers = [ TFIdeficsGatedCrossAttentionLayer(config, name=f"gated_cross_attn_layers.{i}") for i in range(num_cross_layers) ] self.gradient_checkpointing = False self.norm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name="norm") self.gradient_checkpointing = False self.freeze_relevant_params(config) def freeze_relevant_params(self, config=None): if config is None: config = self.config if config.freeze_text_layers: self.freeze_text_layers(config.freeze_text_module_exceptions) if config.freeze_vision_layers: freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions) def freeze_text_layers(self, module_exceptions=[]): for module in [self.decoder_layers, self.norm]: freeze_model(module, module_exceptions=module_exceptions) def freeze_vision_layers(self, module_exceptions=[]): freeze_model(self.vision_model, module_exceptions=module_exceptions) def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None # if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask @unpack_inputs @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) def call( self, input_ids: TFModelInputType | None = None, attention_mask: Optional[tf.Tensor] = None, position_ids: Optional[tf.Tensor] = None, past_key_values: Optional[List[tf.Tensor]] = None, inputs_embeds: Optional[tf.Tensor] = None, pixel_values: Optional[tf.Tensor] = None, image_encoder_embeddings: Optional[tf.Tensor] = None, perceiver_embeddings: Optional[tf.Tensor] = None, image_attention_mask: Optional[tf.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = None, training: Optional[bool] = None, ) -> Union[TFIdeficsBaseModelOutputWithPast, Tuple[tf.Tensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = shape_list(input_ids) elif inputs_embeds is not None: batch_size, seq_length, _ = shape_list(inputs_embeds) else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = shape_list(past_key_values[0][0])[2] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = tf.math.cumsum(tf.cast(attention_mask, dtype=tf.int32), axis=-1) - 1 position_ids = tf.where(attention_mask == 0, 1, position_ids) elif position_ids is None: position_ids = tf.range(past_key_values_length, seq_length + past_key_values_length, dtype=tf.int32) position_ids = tf.expand_dims(position_ids, 0) no_images = False if ( sum((int(pixel_values is None), int(image_encoder_embeddings is None), int(perceiver_embeddings is None))) != 2 ): raise ValueError( "Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None." ) elif pixel_values is not None: no_images = tf.reduce_sum(tf.cast(pixel_values, dtype=tf.int32)) == 0 pixel_values = tf.cast(pixel_values, dtype=self.dtype) # fp16 compatibility # Below hack is because when cross-loading pytorch weights, there is an # initial forward pass with dummy input and code below is here to handle that if len(pixel_values.shape) == 4: batch_size = shape_list(pixel_values)[0] num_images = shape_list(pixel_values)[0] # pixel_values = tf.reshape(pixel_values, [batch_size * num_images, *pixel_values.shape[1:]]) elif len(pixel_values.shape) == 5: batch_size, num_images = shape_list(pixel_values)[:2] pixel_values = tf.reshape(pixel_values, [batch_size * num_images, *pixel_values.shape[2:]]) # Get sequence from the vision encoder image_hidden_states = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding ).last_hidden_state elif image_encoder_embeddings is not None: batch_size, num_images, image_seq_len, image_hidden_size = shape_list(image_encoder_embeddings) image_hidden_states = tf.cast(image_encoder_embeddings, dtype=self.dtype) image_hidden_states = tf.reshape( image_hidden_states, (batch_size * num_images, image_seq_len, image_hidden_size) ) if self.config.use_resampler: if perceiver_embeddings is None: perceiver_embeddings = self.perceiver_resampler(image_hidden_states) image_seq_len, image_hidden_size = shape_list(perceiver_embeddings)[1:3] else: batch_size, num_images, image_seq_len, image_hidden_size = shape_list(perceiver_embeddings) image_hidden_states = perceiver_embeddings elif perceiver_embeddings is None: image_seq_len, image_hidden_size = shape_list(image_hidden_states)[1:3] else: raise ValueError("If `perceiver_embeddings` are passed, use_resampler should be True") image_hidden_states = tf.reshape( image_hidden_states, (batch_size, num_images * image_seq_len, image_hidden_size) ) # # Hack to use the model in full language modeling mode # image_attention_mask = tf.zeros((batch_size, seq_length, 1), dtype=tf.int32) # this is to account for the dummy inputs if pixel_values is not None and len(pixel_values.shape) == 4 and image_attention_mask is None: image_attention_mask = tf.zeros((batch_size, seq_length, 1), dtype=tf.int32) text_seq_len = shape_list(image_attention_mask)[1] image_attention_mask = tf.expand_dims(image_attention_mask, -1) image_attention_mask = tf.repeat(image_attention_mask, repeats=image_seq_len) image_attention_mask = tf.reshape(image_attention_mask, (batch_size, text_seq_len, num_images * image_seq_len)) if image_hidden_states is not None: image_batch_size, image_sequence_length, _ = shape_list(image_hidden_states) image_hidden_shape = (image_batch_size, image_sequence_length) if image_attention_mask is None: image_attention_mask = tf.ones(image_hidden_shape, dtype=tf.int32) image_attention_mask = invert_attention_mask(image_attention_mask) else: image_attention_mask = None cross_attention_gate = tf.squeeze( tf.cast(tf.reduce_any(image_attention_mask == 0, axis=-1), dtype=self.dtype), axis=1 ) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions if attention_mask is None: attention_mask = tf.ones((batch_size, seq_length_with_past), dtype=tf.bool) attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) hidden_states = inputs_embeds if self.gradient_checkpointing and training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.decoder_layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None def vblock( main_block, hidden_states, attention_mask, position_ids, past_key_value, image_hidden_states, image_attention_mask, cross_attention_gate, output_attentions, use_cache, layer_idx, cross_layer_interval, gated_cross_attn_layers, ): # TODO(ls): Add cross attention values to respective lists if layer_idx % cross_layer_interval == 0: xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval] outputs = xblock( hidden_states, attention_mask=attention_mask, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, past_key_value=None, # not implemented ) hidden_states = outputs[0] layer_outputs = main_block( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) return layer_outputs if self.gradient_checkpointing and training: past_key_value = None if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False layer_outputs = tf.recompute_grad( vblock, decoder_layer, hidden_states, attention_mask, position_ids, past_key_value, image_hidden_states, image_attention_mask, output_attentions, use_cache, no_images, idx, self.cross_layer_interval, self.gated_cross_attn_layers, ) else: layer_outputs = vblock( decoder_layer, hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, layer_idx=idx, cross_layer_interval=self.cross_layer_interval, gated_cross_attn_layers=self.gated_cross_attn_layers, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None image_hidden_states = tf.reshape( image_hidden_states, (batch_size, num_images, image_seq_len, image_hidden_size) ) if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states] if v is not None ) return TFIdeficsBaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, image_hidden_states=image_hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_tokens", None) is not None: with tf.name_scope(self.embed_tokens.name): self.embed_tokens.build(None) if getattr(self, "vision_model", None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build(None) if getattr(self, "perceiver_resampler", None) is not None: with tf.name_scope(self.perceiver_resampler.name): self.perceiver_resampler.build(None) if getattr(self, "decoder_layers", None) is not None: for layer in self.decoder_layers: with tf.name_scope(layer.name): layer.build(None) if getattr(self, "gated_cross_attn_layers", None) is not None: for layer in self.gated_cross_attn_layers: with tf.name_scope(layer.name): layer.build(None) class TFIdeficsModel(TFIdeficsPreTrainedModel): def __init__(self, config: IdeficsConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFIdeficsMainLayer(config, name="model") def call( self, input_ids: TFModelInputType | None = None, attention_mask: Optional[tf.Tensor] = None, position_ids: Optional[tf.Tensor] = None, past_key_values: Optional[List[tf.Tensor]] = None, inputs_embeds: Optional[tf.Tensor] = None, pixel_values: Optional[tf.Tensor] = None, image_encoder_embeddings: Optional[tf.Tensor] = None, perceiver_embeddings: Optional[tf.Tensor] = None, image_attention_mask: Optional[tf.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = None, training: Optional[bool] = None, ) -> Union[TFIdeficsBaseModelOutputWithPast, Tuple[tf.Tensor]]: outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, image_encoder_embeddings=image_encoder_embeddings, perceiver_embeddings=perceiver_embeddings, image_attention_mask=image_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) class TFIdeficsForVisionText2Text(TFPreTrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_missing = [r"lm_head.weight"] _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"] config_class = IdeficsConfig def __init__(self, config, vision_model=None, **kwargs): super().__init__(config, **kwargs) self.model = TFIdeficsMainLayer(config, name="model") self.lm_head = TFIdeficsDecoupledLinear( config.hidden_size, config.vocab_size, config.additional_vocab_size, bias=False, partially_freeze=config.freeze_lm_head, name="lm_head", ) def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def tie_weights(self): """ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of IdeficsDecoupledLinear and IdeficsDecoupledEmbedding. """ output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() if getattr(self.config, "tie_word_embeddings", True): output_embeddings.weight = input_embeddings.weight if input_embeddings.num_additional_embeddings > 0: assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): output_embeddings.out_features = input_embeddings.num_embeddings if hasattr(output_embeddings, "out_additional_features") and hasattr( input_embeddings, "num_additional_embeddings" ): output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings @unpack_inputs @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFIdeficsCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: Optional[tf.Tensor] = None, position_ids: Optional[tf.Tensor] = None, past_key_values: Optional[List[tf.Tensor]] = None, inputs_embeds: Optional[tf.Tensor] = None, pixel_values: Optional[tf.Tensor] = None, image_encoder_embeddings: Optional[tf.Tensor] = None, perceiver_embeddings: Optional[tf.Tensor] = None, image_attention_mask: Optional[tf.Tensor] = None, labels: Optional[tf.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = None, training=False, ) -> Union[TFIdeficsCausalLMOutputWithPast, Tuple[tf.Tensor]]: r""" Args: labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >> from transformers import AutoTokenizer, TFIdeficsForVisionText2Text >> model = TFIdeficsForVisionText2Text.from_pretrained("HuggingFaceM4/idefics-9b") >> tokenizer = AutoTokenizer.from_pretrained("HuggingFaceM4/idefics-9b") >> prompt = "Hey, are you consciours? Can you talk to me?" >> inputs = tokenizer(prompt, return_tensors="tf") >> # Generate >> generate_ids = model.generate(inputs.input_ids, max_length=30) >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, image_encoder_embeddings=image_encoder_embeddings, perceiver_embeddings=perceiver_embeddings, image_attention_mask=image_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n if attention_mask is not None: shift_attention_mask = attention_mask[..., 1:] shift_logits = logits[..., :-1, :][shift_attention_mask != 0] shift_labels = labels[..., 1:][shift_attention_mask != 0] else: shift_logits = logits[..., :-1, :] shift_labels = labels[..., 1:] # Flatten the tokens loss = self.hf_compute_loss( labels=tf.reshape(shift_labels, [-1]), logits=tf.reshape(shift_logits, [-1, shift_logits.shape[-1]]) ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFIdeficsCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): image_hidden_states = kwargs.pop("image_hidden_states", None) if image_hidden_states is not None: if self.config.use_resampler: kwargs["perceiver_embeddings"] = image_hidden_states else: kwargs["image_encoder_embeddings"] = image_hidden_states kwargs["pixel_values"] = None inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs) unwanted_kwargs = ["token_type_ids"] for kwarg in unwanted_kwargs: inputs.pop(kwarg, None) return inputs @staticmethod def _expand_inputs_for_generation( *args, **model_kwargs, ): return expand_inputs_for_generation(*args, **model_kwargs) @staticmethod def _update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder): return update_model_kwargs_for_generation(outputs, model_kwargs) @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, beam_idx) for past_state in layer_past),) return reordered_past def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None)
transformers/src/transformers/models/idefics/modeling_tf_idefics.py/0
{ "file_path": "transformers/src/transformers/models/idefics/modeling_tf_idefics.py", "repo_id": "transformers", "token_count": 35929 }
369
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ImageGPT.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) def squared_euclidean_distance(a, b): b = b.T a2 = np.sum(np.square(a), axis=1) b2 = np.sum(np.square(b), axis=0) ab = np.matmul(a, b) d = a2[:, None] - 2 * ab + b2[None, :] return d def color_quantize(x, clusters): x = x.reshape(-1, 3) d = squared_euclidean_distance(x, clusters) return np.argmin(d, axis=1) class ImageGPTImageProcessor(BaseImageProcessor): r""" Constructs a ImageGPT image processor. This image processor can be used to resize images to a smaller resolution (such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of "pixel values" (color clusters). Args: clusters (`np.ndarray` or `List[List[int]]`, *optional*): The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overriden by `clusters` in `preprocess`. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's dimensions to `(size["height"], size["width"])`. Can be overridden by `do_resize` in `preprocess`. size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`): Size of the image after resizing. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image pixel value to between [-1, 1]. Can be overridden by `do_normalize` in `preprocess`. do_color_quantize (`bool`, *optional*, defaults to `True`): Whether to color quantize the image. Can be overridden by `do_color_quantize` in `preprocess`. """ model_input_names = ["pixel_values"] def __init__( self, # clusters is a first argument to maintain backwards compatibility with the old ImageGPTImageProcessor clusters: Optional[Union[List[List[int]], np.ndarray]] = None, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_normalize: bool = True, do_color_quantize: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 256, "width": 256} size = get_size_dict(size) self.clusters = np.array(clusters) if clusters is not None else None self.do_resize = do_resize self.size = size self.resample = resample self.do_normalize = do_normalize self.do_color_quantize = do_color_quantize # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def normalize( self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Normalizes an images' pixel values to between [-1, 1]. Args: image (`np.ndarray`): Image to normalize. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format) image = image - 1 return image @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_normalize: bool = None, do_color_quantize: Optional[bool] = None, clusters: Optional[Union[List[List[int]], np.ndarray]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_normalize=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image do_color_quantize (`bool`, *optional*, defaults to `self.do_color_quantize`): Whether to color quantize the image. clusters (`np.ndarray` or `List[List[int]]`, *optional*, defaults to `self.clusters`): Clusters used to quantize the image of shape `(n_clusters, 3)`. Only has an effect if `do_color_quantize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. Only has an effect if `do_color_quantize` is set to `False`. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize clusters = clusters if clusters is not None else self.clusters clusters = np.array(clusters) images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # Here, normalize() is using a constant factor to divide pixel values. # hence, the method does not need iamge_mean and image_std. validate_preprocess_arguments( do_resize=do_resize, size=size, resample=resample, ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_normalize: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If you wish to do this, " "make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].", ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_normalize: images = [self.normalize(image=image, input_data_format=input_data_format) for image in images] if do_color_quantize: images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) images = np.array(images) images = color_quantize(images, clusters).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) batch_size = images.shape[0] images = images.reshape(batch_size, -1) # We need to convert back to a list of images to keep consistent behaviour across processors. images = list(images) else: images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"input_ids": images} return BatchFeature(data=data, tensor_type=return_tensors)
transformers/src/transformers/models/imagegpt/image_processing_imagegpt.py/0
{ "file_path": "transformers/src/transformers/models/imagegpt/image_processing_imagegpt.py", "repo_id": "transformers", "token_count": 6013 }
370
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for LayoutLMv3. """ import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class LayoutLMv3Processor(ProcessorMixin): r""" Constructs a LayoutLMv3 processor which combines a LayoutLMv3 image processor and a LayoutLMv3 tokenizer into a single processor. [`LayoutLMv3Processor`] offers all the functionalities you need to prepare data for the model. It first uses [`LayoutLMv3ImageProcessor`] to resize and normalize document images, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to [`LayoutLMv3Tokenizer`] or [`LayoutLMv3TokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned into token-level `labels` for token classification tasks (such as FUNSD, CORD). Args: image_processor (`LayoutLMv3ImageProcessor`, *optional*): An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input. tokenizer (`LayoutLMv3Tokenizer` or `LayoutLMv3TokenizerFast`, *optional*): An instance of [`LayoutLMv3Tokenizer`] or [`LayoutLMv3TokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "LayoutLMv3ImageProcessor" tokenizer_class = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) def __call__( self, images, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchEncoding: """ This method first forwards the `images` argument to [`~LayoutLMv3ImageProcessor.__call__`]. In case [`LayoutLMv3ImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and bounding boxes along with the additional arguments to [`~LayoutLMv3Tokenizer.__call__`] and returns the output, together with resized and normalized `pixel_values`. In case [`LayoutLMv3ImageProcessor`] was initialized with `apply_ocr` set to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional arguments to [`~LayoutLMv3Tokenizer.__call__`] and returns the output, together with resized and normalized `pixel_values`. Please refer to the docstring of the above two methods for more information. """ # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor features = self.image_processor(images=images, return_tensors=return_tensors) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(text, str): text = [text] # add batch dimension (as the image processor always adds a batch dimension) text_pair = features["words"] encoded_inputs = self.tokenizer( text=text if text is not None else features["words"], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features["boxes"], word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) # add pixel values images = features.pop("pixel_values") if return_overflowing_tokens is True: images = self.get_overflowing_images(images, encoded_inputs["overflow_to_sample_mapping"]) encoded_inputs["pixel_values"] = images return encoded_inputs def get_overflowing_images(self, images, overflow_to_sample_mapping): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image images_with_overflow = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx]) if len(images_with_overflow) != len(overflow_to_sample_mapping): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}" ) return images_with_overflow def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
transformers/src/transformers/models/layoutlmv3/processing_layoutlmv3.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv3/processing_layoutlmv3.py", "repo_id": "transformers", "token_count": 3523 }
371
# coding=utf-8 # Copyright 2023 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Llava model.""" from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ... import PreTrainedModel from ...activations import ACT2FN from ...modeling_outputs import ModelOutput from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ..auto import AutoModel, AutoModelForCausalLM from .configuration_llava import LlavaConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LlavaConfig" # Base docstring _CHECKPOINT_FOR_DOC = "llava-hf/llava-1.5-7b-hf" @dataclass # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->Llava class LlavaCausalLMOutputWithPast(ModelOutput): """ Base class for Llava causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(torch.FloatTensor)`, *optional*): Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class LlavaMultiModalProjector(nn.Module): def __init__(self, config: LlavaConfig): super().__init__() self.linear_1 = nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True) def forward(self, image_features): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states LLAVA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LlavaConfig`] or [`LlavaVisionConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAVA_START_DOCSTRING, ) class LlavaPreTrainedModel(PreTrainedModel): config_class = LlavaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["LlavaVisionAttention"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_cache_class = True def _init_weights(self, module): # important: this ported version of Llava isn't meant for training from scratch - only # inference and fine-tuning - so the proper init weights code has been removed - the original codebase # https://github.com/haotian-liu/LLaVA/tree/main/llava should serve for that purpose std = ( self.config.initializer_range if hasattr(self.config, "initializer_range") else self.config.text_config.initializer_range ) if hasattr(module, "class_embedding"): module.class_embedding.data.normal_(mean=0.0, std=std) if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def _supports_sdpa(self): """ Retrieve language_model's attribute to check whether the model supports SDPA or not. """ return self.language_model._supports_sdpa LLAVA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)): The tensors corresponding to the input images. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses [`CLIPImageProcessor`] for processing images). attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. vision_feature_layer (`int`, *optional*, defaults to -2): The index of the layer to select the vision feature. vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the vision backbone. Can be one of `"default"` or `"full"`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ @add_start_docstrings( """The LLAVA model which consists of a vision backbone and a language model.""", LLAVA_START_DOCSTRING, ) class LlavaForConditionalGeneration(LlavaPreTrainedModel): def __init__(self, config: LlavaConfig): super().__init__(config) self.vision_tower = AutoModel.from_config(config.vision_config) self.multi_modal_projector = LlavaMultiModalProjector(config) self.vocab_size = config.text_config.vocab_size self.language_model = AutoModelForCausalLM.from_config( config.text_config, attn_implementation=config._attn_implementation ) self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_output_embeddings(self): return self.language_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def set_decoder(self, decoder): self.language_model.set_decoder(decoder) def get_decoder(self): return self.language_model.get_decoder() def tie_weights(self): return self.language_model.tie_weights() def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding: model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of) # update vocab size self.config.text_config.vocab_size = model_embeds.num_embeddings self.vocab_size = model_embeds.num_embeddings return model_embeds def _merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids, attention_mask, labels): num_images, num_image_patches, embed_dim = image_features.shape batch_size, sequence_length = input_ids.shape left_padding = not torch.sum(input_ids[:, -1] == torch.tensor(self.pad_token_id)) # 1. Create a mask to know where special image tokens are special_image_token_mask = input_ids == self.config.image_token_index num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1) # Compute the maximum embed dimension max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length batch_indices, non_image_indices = torch.where(input_ids != self.config.image_token_index) # 2. Compute the positions where text should be written # Calculate new positions for text tokens in merged image-text sequence. # `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens. # `torch.cumsum` computes how each image token shifts subsequent text token positions. # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one. new_token_positions = torch.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1 nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1] if left_padding: new_token_positions += nb_image_pad[:, None] # offset for left padding text_to_overwrite = new_token_positions[batch_indices, non_image_indices] # 3. Create the full embedding, already padded to the maximum position final_embedding = torch.zeros( batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device ) final_attention_mask = torch.zeros( batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device ) if labels is not None: final_labels = torch.full( (batch_size, max_embed_dim), self.config.ignore_index, dtype=input_ids.dtype, device=input_ids.device ) # In case the Vision model or the Language model has been offloaded to CPU, we need to manually # set the corresponding tensors into their correct target device. target_device = inputs_embeds.device batch_indices, non_image_indices, text_to_overwrite = ( batch_indices.to(target_device), non_image_indices.to(target_device), text_to_overwrite.to(target_device), ) attention_mask = attention_mask.to(target_device) # 4. Fill the embeddings based on the mask. If we have ["hey" "<image>", "how", "are"] # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices] final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices] if labels is not None: final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices] # 5. Fill the embeddings corresponding to the images. Anything that is not `text_positions` needs filling (#29835) image_to_overwrite = torch.full( (batch_size, max_embed_dim), True, dtype=torch.bool, device=inputs_embeds.device ) image_to_overwrite[batch_indices, text_to_overwrite] = False image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None].to(target_device) if image_to_overwrite.sum() != image_features.shape[:-1].numel(): raise ValueError( f"The input provided to the model are wrong. The number of image tokens is {torch.sum(special_image_token_mask)} while" f" the number of image given to the model is {num_images}. This prevents correct indexing and breaks batch generation." ) final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device) final_attention_mask |= image_to_overwrite position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1) # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens. batch_indices, pad_indices = torch.where(input_ids == self.pad_token_id) indices_to_mask = new_token_positions[batch_indices, pad_indices] final_embedding[batch_indices, indices_to_mask] = 0 if labels is None: final_labels = None return final_embedding, final_attention_mask, final_labels, position_ids @add_start_docstrings_to_model_forward(LLAVA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=LlavaCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[int] = None, vision_feature_select_strategy: Optional[str] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, LlavaCausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, LlavaForConditionalGeneration >>> model = LlavaForConditionalGeneration.from_pretrained("llava-hf/llava-1.5-7b-hf") >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf") >>> prompt = "USER: <image>\nWhat's the content of the image? ASSISTANT:" >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=prompt, images=image, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_new_tokens=15) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "USER: \nWhat's the content of the image? ASSISTANT: The image features a busy city street with a stop sign prominently displayed" ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) vision_feature_select_strategy = ( vision_feature_select_strategy if vision_feature_select_strategy is not None else self.config.vision_feature_select_strategy ) if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" ) if pixel_values is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one" ) legacy_processing = False if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) # if the number of image tokens is more than image embeddings seq length, then prob we expanded it in processing # not very reliable, but we don't expect one to actually pass 500+ images for one prompt # In case we're in decoding stage, legacy behavior is checked by presence of pixel values even if use_cache=True legacy_processing = ( (input_ids == self.config.image_token_index).sum(1).max() < self.config.image_seq_length ) or (input_ids.shape[-1] == 1 and pixel_values is not None) if pixel_values is not None: image_outputs = self.vision_tower(pixel_values, output_hidden_states=True) # this is not memory efficient at all (output_hidden_states=True) will save all the hidden stated. selected_image_feature = image_outputs.hidden_states[vision_feature_layer] if vision_feature_select_strategy == "default": selected_image_feature = selected_image_feature[:, 1:] elif vision_feature_select_strategy == "full": selected_image_feature = selected_image_feature else: raise ValueError(f"Unexpected select feature strategy: {self.config.vision_feature_select_strategy}") image_features = self.multi_modal_projector(selected_image_feature) if legacy_processing: logger.warning_once( "Expanding inputs for image tokens in LLaVa should be done in processing. " "Please add `patch_size` and `vision_feature_select_strategy` to the model's processing config or set directly " "with `processor.patch_size = {{patch_size}}` and processor.vision_feature_select_strategy = {{vision_feature_select_strategy}}`. " "Using processors without these attributes in the config is deprecated and will throw an error in v4.47." ) # prefill stage vs decoding stage (legacy behavior copied) if input_ids.shape[1] != 1: inputs_embeds, attention_mask, labels, position_ids = self._merge_input_ids_with_image_features( image_features, inputs_embeds, input_ids, attention_mask, labels ) else: # Retrieve the first layer to inspect the logits and mask out the hidden states # that are set to 0 first_layer_past_key_value = past_key_values[0][0][:, :, :, 0] # Sum all dimensions of head_dim (-2) to avoid random errors such as: https://github.com/huggingface/transformers/pull/28032#issuecomment-1863691941 batch_index, non_attended_tokens = torch.where(first_layer_past_key_value.float().sum(-2) == 0) # Get the target length target_length = input_ids.shape[1] past_length = first_layer_past_key_value.shape[-1] extended_attention_mask = torch.ones( (attention_mask.shape[0], past_length), dtype=attention_mask.dtype, device=attention_mask.device, ) # Filter out only the tokens that can be un-attended, this can happen # if one uses Llava + Fused modules where the cache on the # first iteration is already big enough, or if one passes custom cache valid_indices = non_attended_tokens < extended_attention_mask.size(-1) new_batch_index = batch_index[valid_indices] new_non_attended_tokens = non_attended_tokens[valid_indices] # Zero-out the places where we don't need to attend extended_attention_mask[new_batch_index, new_non_attended_tokens] = 0 attention_mask = torch.cat((extended_attention_mask, attention_mask[:, -target_length:]), dim=1) position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1 # TODO: @raushan retain only the new behavior after v4.47 else: special_image_mask = ( (input_ids == self.config.image_token_index).unsqueeze(-1).expand_as(inputs_embeds) ) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) logits = outputs[0] loss = None if labels is not None: # Shift so that tokens < n predict n if attention_mask is not None: shift_attention_mask = attention_mask[..., 1:] shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous() shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous() else: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = nn.CrossEntropyLoss() loss = loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device) ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return LlavaCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, cache_position=None, **kwargs, ): # Trigger the new behavior if we have more than image embeddings seq length tokens for images legacy_processing = ( input_ids is not None and (input_ids == self.config.image_token_index).sum(1).max() < self.config.image_seq_length ) model_inputs = self.language_model.prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, **kwargs, ) if legacy_processing: model_inputs["pixel_values"] = pixel_values elif cache_position[0] == 0: # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model model_inputs["pixel_values"] = pixel_values return model_inputs
transformers/src/transformers/models/llava/modeling_llava.py/0
{ "file_path": "transformers/src/transformers/models/llava/modeling_llava.py", "repo_id": "transformers", "token_count": 12263 }
372
# coding=utf-8 # Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Longformer configuration""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase logger = logging.get_logger(__name__) class LongformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LongformerModel`] or a [`TFLongformerModel`]. It is used to instantiate a Longformer model according to the specified arguments, defining the model architecture. This is the configuration class to store the configuration of a [`LongformerModel`]. It is used to instantiate an Longformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LongFormer [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) architecture with a sequence length 4,096. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the Longformer model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LongformerModel`] or [`TFLongformerModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`LongformerModel`] or [`TFLongformerModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. attention_window (`int` or `List[int]`, *optional*, defaults to 512): Size of an attention window around each token. If an `int`, use the same size for all layers. To specify a different window size for each layer, use a `List[int]` where `len(attention_window) == num_hidden_layers`. Example: ```python >>> from transformers import LongformerConfig, LongformerModel >>> # Initializing a Longformer configuration >>> configuration = LongformerConfig() >>> # Initializing a model from the configuration >>> model = LongformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "longformer" def __init__( self, attention_window: Union[List[int], int] = 512, sep_token_id: int = 2, pad_token_id: int = 1, bos_token_id: int = 0, eos_token_id: int = 2, vocab_size: int = 30522, hidden_size: int = 768, num_hidden_layers: int = 12, num_attention_heads: int = 12, intermediate_size: int = 3072, hidden_act: str = "gelu", hidden_dropout_prob: float = 0.1, attention_probs_dropout_prob: float = 0.1, max_position_embeddings: int = 512, type_vocab_size: int = 2, initializer_range: float = 0.02, layer_norm_eps: float = 1e-12, onnx_export: bool = False, **kwargs, ): """Constructs LongformerConfig.""" super().__init__(pad_token_id=pad_token_id, **kwargs) self.attention_window = attention_window self.sep_token_id = sep_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.onnx_export = onnx_export class LongformerOnnxConfig(OnnxConfig): def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: "List[PatchingSpec]" = None): super().__init__(config, task, patching_specs) config.onnx_export = True @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("global_attention_mask", dynamic_axis), ] ) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: outputs = super().outputs if self.task == "default": outputs["pooler_output"] = {0: "batch"} return outputs @property def atol_for_validation(self) -> float: """ What absolute tolerance value to use during model conversion validation. Returns: Float absolute tolerance value. """ return 1e-4 @property def default_onnx_opset(self) -> int: # needs to be >= 14 to support tril operator return max(super().default_onnx_opset, 14) def generate_dummy_inputs( self, tokenizer: "PreTrainedTokenizerBase", batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: inputs = super().generate_dummy_inputs( preprocessor=tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly inputs["global_attention_mask"] = torch.zeros_like(inputs["input_ids"]) # make every second token global inputs["global_attention_mask"][:, ::2] = 1 return inputs
transformers/src/transformers/models/longformer/configuration_longformer.py/0
{ "file_path": "transformers/src/transformers/models/longformer/configuration_longformer.py", "repo_id": "transformers", "token_count": 3326 }
373
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_lxmert": ["LxmertConfig"], "tokenization_lxmert": ["LxmertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_lxmert_fast"] = ["LxmertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_lxmert"] = [ "LxmertEncoder", "LxmertForPreTraining", "LxmertForQuestionAnswering", "LxmertModel", "LxmertPreTrainedModel", "LxmertVisualFeatureEncoder", "LxmertXLayer", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_lxmert"] = [ "TFLxmertForPreTraining", "TFLxmertMainLayer", "TFLxmertModel", "TFLxmertPreTrainedModel", "TFLxmertVisualFeatureEncoder", ] if TYPE_CHECKING: from .configuration_lxmert import LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/lxmert/__init__.py/0
{ "file_path": "transformers/src/transformers/models/lxmert/__init__.py", "repo_id": "transformers", "token_count": 1356 }
374
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for MarkupLM. """ from typing import Optional, Union from ...file_utils import TensorType from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TruncationStrategy class MarkupLMProcessor(ProcessorMixin): r""" Constructs a MarkupLM processor which combines a MarkupLM feature extractor and a MarkupLM tokenizer into a single processor. [`MarkupLMProcessor`] offers all the functionalities you need to prepare data for the model. It first uses [`MarkupLMFeatureExtractor`] to extract nodes and corresponding xpaths from one or more HTML strings. Next, these are provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which turns them into token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and `xpath_subs_seq`. Args: feature_extractor (`MarkupLMFeatureExtractor`): An instance of [`MarkupLMFeatureExtractor`]. The feature extractor is a required input. tokenizer (`MarkupLMTokenizer` or `MarkupLMTokenizerFast`): An instance of [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. The tokenizer is a required input. parse_html (`bool`, *optional*, defaults to `True`): Whether or not to use `MarkupLMFeatureExtractor` to parse HTML strings into nodes and corresponding xpaths. """ feature_extractor_class = "MarkupLMFeatureExtractor" tokenizer_class = ("MarkupLMTokenizer", "MarkupLMTokenizerFast") parse_html = True def __call__( self, html_strings=None, nodes=None, xpaths=None, node_labels=None, questions=None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchEncoding: """ This method first forwards the `html_strings` argument to [`~MarkupLMFeatureExtractor.__call__`]. Next, it passes the `nodes` and `xpaths` along with the additional arguments to [`~MarkupLMTokenizer.__call__`] and returns the output. Optionally, one can also provide a `text` argument which is passed along as first sequence. Please refer to the docstring of the above two methods for more information. """ # first, create nodes and xpaths if self.parse_html: if html_strings is None: raise ValueError("Make sure to pass HTML strings in case `parse_html` is set to `True`") if nodes is not None or xpaths is not None or node_labels is not None: raise ValueError( "Please don't pass nodes, xpaths nor node labels in case `parse_html` is set to `True`" ) features = self.feature_extractor(html_strings) nodes = features["nodes"] xpaths = features["xpaths"] else: if html_strings is not None: raise ValueError("You have passed HTML strings but `parse_html` is set to `False`.") if nodes is None or xpaths is None: raise ValueError("Make sure to pass nodes and xpaths in case `parse_html` is set to `False`") # # second, apply the tokenizer if questions is not None and self.parse_html: if isinstance(questions, str): questions = [questions] # add batch dimension (as the feature extractor always adds a batch dimension) encoded_inputs = self.tokenizer( text=questions if questions is not None else nodes, text_pair=nodes if questions is not None else None, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) return encoded_inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names return tokenizer_input_names
transformers/src/transformers/models/markuplm/processing_markuplm.py/0
{ "file_path": "transformers/src/transformers/models/markuplm/processing_markuplm.py", "repo_id": "transformers", "token_count": 2521 }
375
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc.s and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MaskFormer model.""" import math from dataclasses import dataclass from numbers import Number from typing import Dict, List, Optional, Tuple import numpy as np import torch from torch import Tensor, nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import is_torch_greater_or_equal_than_2_1 from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_accelerate_available, is_scipy_available, logging, replace_return_docstrings, requires_backends, ) from ...utils.backbone_utils import load_backbone from ...utils.import_utils import is_torchdynamo_compiling from ..detr import DetrConfig from .configuration_maskformer import MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig if is_accelerate_available(): from accelerate import PartialState from accelerate.utils import reduce if is_scipy_available(): from scipy.optimize import linear_sum_assignment logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "MaskFormerConfig" _CHECKPOINT_FOR_DOC = "facebook/maskformer-swin-base-ade" @dataclass # Copied from transformers.models.detr.modeling_detr.DetrDecoderOutput class DetrDecoderOutput(BaseModelOutputWithCrossAttentions): """ Base class for outputs of the DETR decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """ intermediate_hidden_states: Optional[torch.FloatTensor] = None @dataclass class MaskFormerPixelLevelModuleOutput(ModelOutput): """ MaskFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the `encoder` and `decoder`. By default, the `encoder` is a MaskFormerSwin Transformer and the `decoder` is a Feature Pyramid Network (FPN). The `encoder_last_hidden_state` are referred on the paper as **images features**, while `decoder_last_hidden_state` as **pixel embeddings** Args: encoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`): Last hidden states (final feature map) of the last stage of the encoder. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. decoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`): Last hidden states (final feature map) of the last stage of the decoder. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. """ encoder_last_hidden_state: Optional[torch.FloatTensor] = None decoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MaskFormerPixelDecoderOutput(ModelOutput): """ MaskFormer's pixel decoder module output, practically a Feature Pyramid Network. It returns the last hidden state and (optionally) the hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Last hidden states (final feature map) of the last stage of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights from Detr's decoder after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MaskFormerModelOutput(ModelOutput): """ Class for outputs of [`MaskFormerModel`]. This class returns all the needed hidden states to compute the logits. Args: encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Last hidden states (final feature map) of the last stage of the encoder model (backbone). pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN). transformer_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Last hidden states (final feature map) of the last stage of the transformer decoder model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder model at the output of each stage. pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel decoder model at the output of each stage. transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the transformer decoder at the output of each stage. hidden_states `tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` containing `encoder_hidden_states`, `pixel_decoder_hidden_states` and `decoder_hidden_states` attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights from Detr's decoder after the attention softmax, used to compute the weighted average in the self-attention heads. """ encoder_last_hidden_state: Optional[torch.FloatTensor] = None pixel_decoder_last_hidden_state: Optional[torch.FloatTensor] = None transformer_decoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None pixel_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None transformer_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MaskFormerForInstanceSegmentationOutput(ModelOutput): """ Class for outputs of [`MaskFormerForInstanceSegmentation`]. This output can be directly passed to [`~MaskFormerImageProcessor.post_process_semantic_segmentation`] or or [`~MaskFormerImageProcessor.post_process_instance_segmentation`] or [`~MaskFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see [`~MaskFormerImageProcessor] for details regarding usage. Args: loss (`torch.Tensor`, *optional*): The computed loss, returned when labels are present. class_queries_logits (`torch.FloatTensor`): A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each query. Note the `+ 1` is needed because we incorporate the null class. masks_queries_logits (`torch.FloatTensor`): A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each query. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Last hidden states (final feature map) of the last stage of the encoder model (backbone). pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN). transformer_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Last hidden states (final feature map) of the last stage of the transformer decoder model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder model at the output of each stage. pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel decoder model at the output of each stage. transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the transformer decoder at the output of each stage. hidden_states `tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` containing `encoder_hidden_states`, `pixel_decoder_hidden_states` and `decoder_hidden_states`. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights from Detr's decoder after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None class_queries_logits: torch.FloatTensor = None masks_queries_logits: torch.FloatTensor = None auxiliary_logits: torch.FloatTensor = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None pixel_decoder_last_hidden_state: Optional[torch.FloatTensor] = None transformer_decoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None pixel_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None transformer_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None def upsample_like(pixel_values: Tensor, like: Tensor, mode: str = "bilinear") -> Tensor: """ An utility function that upsamples `pixel_values` to match the dimension of `like`. Args: pixel_values (`torch.Tensor`): The tensor we wish to upsample. like (`torch.Tensor`): The tensor we wish to use as size target. mode (str, *optional*, defaults to `"bilinear"`): The interpolation mode. Returns: `torch.Tensor`: The upsampled tensor """ _, _, height, width = like.shape upsampled = nn.functional.interpolate(pixel_values, size=(height, width), mode=mode, align_corners=False) return upsampled # refactored from original implementation def dice_loss(inputs: Tensor, labels: Tensor, num_masks: int) -> Tensor: r""" Compute the DICE loss, similar to generalized IOU for masks as follows: $$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x \cap y }{x \cup y + 1}} $$ In practice, since `labels` is a binary mask, (only 0s and 1s), dice can be computed as follow $$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x * y }{x + y + 1}} $$ Args: inputs (`torch.Tensor`): A tensor representing a mask. labels (`torch.Tensor`): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class). num_masks (`int`): The number of masks present in the current batch, used for normalization. Returns: `torch.Tensor`: The computed loss. """ probs = inputs.sigmoid().flatten(1) numerator = 2 * (probs * labels).sum(-1) denominator = probs.sum(-1) + labels.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) loss = loss.sum() / num_masks return loss # refactored from original implementation def sigmoid_focal_loss( inputs: Tensor, labels: Tensor, num_masks: int, alpha: float = 0.25, gamma: float = 2 ) -> Tensor: r""" Focal loss proposed in [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002) originally used in RetinaNet. The loss is computed as follows: $$ \mathcal{L}_{\text{focal loss} = -(1 - p_t)^{\gamma}\log{(p_t)} $$ where \\(CE(p_t) = -\log{(p_t)}}\\), CE is the standard Cross Entropy Loss Please refer to equation (1,2,3) of the paper for a better understanding. Args: inputs (`torch.Tensor`): A float tensor of arbitrary shape. labels (`torch.Tensor`): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class). num_masks (`int`): The number of masks present in the current batch, used for normalization. alpha (float, *optional*, defaults to 0.25): Weighting factor in range (0,1) to balance positive vs negative examples. gamma (float, *optional*, defaults to 2.0): Exponent of the modulating factor \\(1 - p_t\\) to balance easy vs hard examples. Returns: `torch.Tensor`: The computed loss. """ criterion = nn.BCEWithLogitsLoss(reduction="none") probs = inputs.sigmoid() cross_entropy_loss = criterion(inputs, labels) p_t = probs * labels + (1 - probs) * (1 - labels) loss = cross_entropy_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * labels + (1 - alpha) * (1 - labels) loss = alpha_t * loss loss = loss.mean(1).sum() / num_masks return loss # refactored from original implementation def pair_wise_dice_loss(inputs: Tensor, labels: Tensor) -> Tensor: """ A pair wise version of the dice loss, see `dice_loss` for usage. Args: inputs (`torch.Tensor`): A tensor representing a mask labels (`torch.Tensor`): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class). Returns: `torch.Tensor`: The computed loss between each pairs. """ inputs = inputs.sigmoid().flatten(1) numerator = 2 * torch.matmul(inputs, labels.T) # using broadcasting to get a [num_queries, NUM_CLASSES] matrix denominator = inputs.sum(-1)[:, None] + labels.sum(-1)[None, :] loss = 1 - (numerator + 1) / (denominator + 1) return loss # refactored from original implementation def pair_wise_sigmoid_focal_loss(inputs: Tensor, labels: Tensor, alpha: float = 0.25, gamma: float = 2.0) -> Tensor: r""" A pair wise version of the focal loss, see `sigmoid_focal_loss` for usage. Args: inputs (`torch.Tensor`): A tensor representing a mask. labels (`torch.Tensor`): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class). alpha (float, *optional*, defaults to 0.25): Weighting factor in range (0,1) to balance positive vs negative examples. gamma (float, *optional*, defaults to 2.0): Exponent of the modulating factor \\(1 - p_t\\) to balance easy vs hard examples. Returns: `torch.Tensor`: The computed loss between each pairs. """ if alpha < 0: raise ValueError("alpha must be positive") height_and_width = inputs.shape[1] criterion = nn.BCEWithLogitsLoss(reduction="none") prob = inputs.sigmoid() cross_entropy_loss_pos = criterion(inputs, torch.ones_like(inputs)) focal_pos = ((1 - prob) ** gamma) * cross_entropy_loss_pos focal_pos *= alpha cross_entropy_loss_neg = criterion(inputs, torch.zeros_like(inputs)) focal_neg = (prob**gamma) * cross_entropy_loss_neg focal_neg *= 1 - alpha loss = torch.matmul(focal_pos, labels.T) + torch.matmul(focal_neg, (1 - labels).T) return loss / height_and_width # Copied from transformers.models.detr.modeling_detr.DetrAttention class DetrAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor]): return tensor if object_queries is None else tensor + object_queries def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, key_value_states: Optional[torch.Tensor] = None, spatial_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if object_queries is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, object_queries) # add key-value position embeddings to the key value states if spatial_position_embeddings is not None: key_value_states_original = key_value_states key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.detr.modeling_detr.DetrDecoderLayer class DetrDecoderLayer(nn.Module): def __init__(self, config: DetrConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetrAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = DetrAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, query_position_embeddings: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): object_queries that are added to the hidden states in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*): position embeddings that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, object_queries=query_position_embeddings, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, object_queries=query_position_embeddings, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, spatial_position_embeddings=object_queries, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs class DetrDecoder(nn.Module): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for DETR: - object_queries and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: DetrConfig """ def __init__(self, config: DetrConfig): super().__init__() self.config = config self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.layers = nn.ModuleList([DetrDecoderLayer(config) for _ in range(config.decoder_layers)]) # in DETR, the decoder uses layernorm after the last decoder layer output self.layernorm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False def forward( self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The query embeddings that are passed into the decoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`: - 1 for queries that are **not masked**, - 0 for queries that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): , *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds input_shape = inputs_embeds.size()[:-1] # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # optional intermediate hidden states intermediate = () if self.config.auxiliary_loss else None # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, None, encoder_hidden_states, encoder_attention_mask, None, output_attentions, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if self.config.auxiliary_loss: hidden_states = self.layernorm(hidden_states) intermediate += (hidden_states,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # finally, apply layernorm hidden_states = self.layernorm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) # stack intermediate decoder activations if self.config.auxiliary_loss: intermediate = torch.stack(intermediate) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate] if v is not None ) return DetrDecoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate, ) # refactored from original implementation class MaskFormerHungarianMatcher(nn.Module): """This class computes an assignment between the labels and the predictions of the network. For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). """ def __init__(self, cost_class: float = 1.0, cost_mask: float = 1.0, cost_dice: float = 1.0): """Creates the matcher Params: cost_class (float, *optional*, defaults to 1.0): This is the relative weight of the classification error in the matching cost. cost_mask (float, *optional*, defaults to 1.0): This is the relative weight of the focal loss of the binary mask in the matching cost. cost_dice (float, *optional*, defaults to 1.0): This is the relative weight of the dice loss of the binary mask in the matching cost """ super().__init__() if cost_class == 0 and cost_mask == 0 and cost_dice == 0: raise ValueError("All costs cant be 0") self.cost_class = cost_class self.cost_mask = cost_mask self.cost_dice = cost_dice @torch.no_grad() def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]: """Performs the matching Params: masks_queries_logits (`torch.Tensor`): A tensor` of dim `batch_size, num_queries, num_labels` with the classification logits. class_queries_logits (`torch.Tensor`): A tensor` of dim `batch_size, num_queries, height, width` with the predicted masks. class_labels (`torch.Tensor`): A tensor` of dim `num_target_boxes` (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. mask_labels (`torch.Tensor`): A tensor` of dim `num_target_boxes, height, width` containing the target masks. Returns: `List[Tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected labels (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes). """ indices: List[Tuple[np.array]] = [] preds_masks = masks_queries_logits preds_probs = class_queries_logits # iterate through batch size for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels): # downsample the target mask, save memory target_mask = nn.functional.interpolate(target_mask[:, None], size=pred_mask.shape[-2:], mode="nearest") pred_probs = pred_probs.softmax(-1) # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. cost_class = -pred_probs[:, labels] # flatten spatial dimension "q h w -> q (h w)" pred_mask_flat = pred_mask.flatten(1) # [num_queries, height*width] # same for target_mask "c h w -> c (h w)" target_mask_flat = target_mask[:, 0].flatten(1) # [num_total_labels, height*width] # compute the focal loss between each mask pairs -> shape (num_queries, num_labels) cost_mask = pair_wise_sigmoid_focal_loss(pred_mask_flat, target_mask_flat) # Compute the dice loss betwen each mask pairs -> shape (num_queries, num_labels) cost_dice = pair_wise_dice_loss(pred_mask_flat, target_mask_flat) # final cost matrix cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice # do the assigmented using the hungarian algorithm in scipy assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu()) indices.append(assigned_indices) # It could be stacked in one tensor matched_indices = [ (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices ] return matched_indices def __repr__(self): head = "Matcher " + self.__class__.__name__ body = [ f"cost_class: {self.cost_class}", f"cost_mask: {self.cost_mask}", f"cost_dice: {self.cost_dice}", ] _repr_indent = 4 lines = [head] + [" " * _repr_indent + line for line in body] return "\n".join(lines) # copied and adapted from original implementation class MaskFormerLoss(nn.Module): def __init__( self, num_labels: int, matcher: MaskFormerHungarianMatcher, weight_dict: Dict[str, float], eos_coef: float, ): """ The MaskFormer Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and mask) Args: num_labels (`int`): The number of classes. matcher (`MaskFormerHungarianMatcher`): A torch module that computes the assigments between the predictions and labels. weight_dict (`Dict[str, float]`): A dictionary of weights to be applied to the different losses. eos_coef (`float`): Weight to apply to the null class. """ super().__init__() requires_backends(self, ["scipy"]) self.num_labels = num_labels self.matcher = matcher self.weight_dict = weight_dict self.eos_coef = eos_coef empty_weight = torch.ones(self.num_labels + 1) empty_weight[-1] = self.eos_coef self.register_buffer("empty_weight", empty_weight) def _max_by_axis(self, the_list: List[List[int]]) -> List[int]: maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes def _pad_images_to_max_in_batch(self, tensors: List[Tensor]) -> Tuple[Tensor, Tensor]: # get the maximum size in the batch max_size = self._max_by_axis([list(tensor.shape) for tensor in tensors]) batch_size = len(tensors) # compute finel size batch_shape = [batch_size] + max_size b, _, h, w = batch_shape # get metadata dtype = tensors[0].dtype device = tensors[0].device padded_tensors = torch.zeros(batch_shape, dtype=dtype, device=device) padding_masks = torch.ones((b, h, w), dtype=torch.bool, device=device) # pad the tensors to the size of the biggest one for tensor, padded_tensor, padding_mask in zip(tensors, padded_tensors, padding_masks): padded_tensor[: tensor.shape[0], : tensor.shape[1], : tensor.shape[2]].copy_(tensor) padding_mask[: tensor.shape[1], : tensor.shape[2]] = False return padded_tensors, padding_masks def loss_labels( self, class_queries_logits: Tensor, class_labels: List[Tensor], indices: Tuple[np.array] ) -> Dict[str, Tensor]: """Compute the losses related to the labels using cross entropy. Args: class_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, num_labels` class_labels (`List[torch.Tensor]`): List of class labels of shape `(labels)`. indices (`Tuple[np.array])`: The indices computed by the Hungarian matcher. Returns: `Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key: - **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels. """ pred_logits = class_queries_logits batch_size, num_queries, _ = pred_logits.shape criterion = nn.CrossEntropyLoss(weight=self.empty_weight) idx = self._get_predictions_permutation_indices(indices) # shape = (batch_size, num_queries) target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)]) # shape = (batch_size, num_queries) target_classes = torch.full( (batch_size, num_queries), fill_value=self.num_labels, dtype=torch.int64, device=pred_logits.device ) target_classes[idx] = target_classes_o # target_classes is a (batch_size, num_labels, num_queries), we need to permute pred_logits "b q c -> b c q" pred_logits_transposed = pred_logits.transpose(1, 2) loss_ce = criterion(pred_logits_transposed, target_classes) losses = {"loss_cross_entropy": loss_ce} return losses def loss_masks( self, masks_queries_logits: Tensor, mask_labels: List[Tensor], indices: Tuple[np.array], num_masks: int ) -> Dict[str, Tensor]: """Compute the losses related to the masks using focal and dice loss. Args: masks_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, height, width` mask_labels (`torch.Tensor`): List of mask labels of shape `(labels, height, width)`. indices (`Tuple[np.array])`: The indices computed by the Hungarian matcher. num_masks (`int)`: The number of masks, used for normalization. Returns: `Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys: - **loss_mask** -- The loss computed using sigmoid focal loss on the predicted and ground truth masks. - **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth masks. """ src_idx = self._get_predictions_permutation_indices(indices) tgt_idx = self._get_targets_permutation_indices(indices) # shape (batch_size * num_queries, height, width) pred_masks = masks_queries_logits[src_idx] # shape (batch_size, num_queries, height, width) # pad all and stack the targets to the num_labels dimension target_masks, _ = self._pad_images_to_max_in_batch(mask_labels) target_masks = target_masks[tgt_idx] # upsample predictions to the target size, we have to add one dim to use interpolate pred_masks = nn.functional.interpolate( pred_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) pred_masks = pred_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) losses = { "loss_mask": sigmoid_focal_loss(pred_masks, target_masks, num_masks), "loss_dice": dice_loss(pred_masks, target_masks, num_masks), } return losses def _get_predictions_permutation_indices(self, indices): # permute predictions following indices batch_indices = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) predictions_indices = torch.cat([src for (src, _) in indices]) return batch_indices, predictions_indices def _get_targets_permutation_indices(self, indices): # permute labels following indices batch_indices = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) target_indices = torch.cat([tgt for (_, tgt) in indices]) return batch_indices, target_indices def forward( self, masks_queries_logits: Tensor, class_queries_logits: Tensor, mask_labels: List[Tensor], class_labels: List[Tensor], auxiliary_predictions: Optional[Dict[str, Tensor]] = None, ) -> Dict[str, Tensor]: """ This performs the loss computation. Args: masks_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, height, width` class_queries_logits (`torch.Tensor`): A tensor of shape `batch_size, num_queries, num_labels` mask_labels (`torch.Tensor`): List of mask labels of shape `(labels, height, width)`. class_labels (`List[torch.Tensor]`): List of class labels of shape `(labels)`. auxiliary_predictions (`Dict[str, torch.Tensor]`, *optional*): if `use_auxiliary_loss` was set to `true` in [`MaskFormerConfig`], then it contains the logits from the inner layers of the Detr's Decoder. Returns: `Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys: - **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels. - **loss_mask** -- The loss computed using sigmoid focal loss on the predicted and ground truth masks. - **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth masks. if `use_auxiliary_loss` was set to `true` in [`MaskFormerConfig`], the dictionary contains addional losses for each auxiliary predictions. """ # retrieve the matching between the outputs of the last layer and the labels indices = self.matcher(masks_queries_logits, class_queries_logits, mask_labels, class_labels) # compute the average number of target masks for normalization purposes num_masks: Number = self.get_num_masks(class_labels, device=class_labels[0].device) # get all the losses losses: Dict[str, Tensor] = { **self.loss_masks(masks_queries_logits, mask_labels, indices, num_masks), **self.loss_labels(class_queries_logits, class_labels, indices), } # in case of auxiliary losses, we repeat this process with the output of each intermediate layer. if auxiliary_predictions is not None: for idx, aux_outputs in enumerate(auxiliary_predictions): masks_queries_logits = aux_outputs["masks_queries_logits"] class_queries_logits = aux_outputs["class_queries_logits"] loss_dict = self.forward(masks_queries_logits, class_queries_logits, mask_labels, class_labels) loss_dict = {f"{key}_{idx}": value for key, value in loss_dict.items()} losses.update(loss_dict) return losses def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor: """ Computes the average number of target masks across the batch, for normalization purposes. """ num_masks = sum([len(classes) for classes in class_labels]) num_masks = torch.as_tensor(num_masks, dtype=torch.float, device=device) world_size = 1 if is_accelerate_available(): if PartialState._shared_state != {}: num_masks = reduce(num_masks) world_size = PartialState().num_processes num_masks = torch.clamp(num_masks / world_size, min=1) return num_masks class MaskFormerFPNConvLayer(nn.Module): def __init__(self, in_features: int, out_features: int, kernel_size: int = 3, padding: int = 1): """ A basic module that executes conv - norm - in sequence used in MaskFormer. Args: in_features (`int`): The number of input features (channels). out_features (`int`): The number of outputs features (channels). """ super().__init__() self.layers = [ nn.Conv2d(in_features, out_features, kernel_size=kernel_size, padding=padding, bias=False), nn.GroupNorm(32, out_features), nn.ReLU(inplace=True), ] for i, layer in enumerate(self.layers): # Provide backwards compatibility from when the class inherited from nn.Sequential # In nn.Sequential subclasses, the name given to the layer is its index in the sequence. # In nn.Module subclasses they derived from the instance attribute they are assigned to e.g. # self.my_layer_name = Layer() # We can't give instance attributes integer names i.e. self.0 is not permitted and so need to register # explicitly self.add_module(str(i), layer) def forward(self, input: Tensor) -> Tensor: hidden_state = input for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state class MaskFormerFPNLayer(nn.Module): def __init__(self, in_features: int, lateral_features: int): """ A Feature Pyramid Network Layer (FPN) layer. It creates a feature map by aggregating features from the previous and backbone layer. Due to the spatial mismatch, the tensor coming from the previous layer is upsampled. Args: in_features (`int`): The number of input features (channels). lateral_features (`int`): The number of lateral features (channels). """ super().__init__() self.proj = nn.Sequential( nn.Conv2d(lateral_features, in_features, kernel_size=1, padding=0, bias=False), nn.GroupNorm(32, in_features), ) self.block = MaskFormerFPNConvLayer(in_features, in_features) def forward(self, down: Tensor, left: Tensor) -> Tensor: left = self.proj(left) down = nn.functional.interpolate(down, size=left.shape[-2:], mode="nearest") down += left down = self.block(down) return down class MaskFormerFPNModel(nn.Module): def __init__(self, in_features: int, lateral_widths: List[int], feature_size: int = 256): """ Feature Pyramid Network, given an input tensor and a set of feature map of different feature/spatial size, it creates a list of feature maps with the same feature size. Args: in_features (`int`): The number of input features (channels). lateral_widths (`List[int]`): A list with the features (channels) size of each lateral connection. feature_size (int, *optional*, defaults to 256): The features (channels) of the resulting feature maps. """ super().__init__() self.stem = MaskFormerFPNConvLayer(in_features, feature_size) self.layers = nn.Sequential( *[MaskFormerFPNLayer(feature_size, lateral_width) for lateral_width in lateral_widths[::-1]] ) def forward(self, features: List[Tensor]) -> List[Tensor]: fpn_features = [] last_feature = features[-1] other_features = features[:-1] output = self.stem(last_feature) for layer, left in zip(self.layers, other_features[::-1]): output = layer(output, left) fpn_features.append(output) return fpn_features class MaskFormerPixelDecoder(nn.Module): def __init__(self, *args, feature_size: int = 256, mask_feature_size: int = 256, **kwargs): r""" Pixel Decoder Module proposed in [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278). It first runs the backbone's features into a Feature Pyramid Network creating a list of feature maps. Then, it projects the last one to the correct `mask_size`. Args: feature_size (`int`, *optional*, defaults to 256): The feature size (channel dimension) of the FPN feature maps. mask_feature_size (`int`, *optional*, defaults to 256): The features (channels) of the target masks size \\(C_{\epsilon}\\) in the paper. """ super().__init__() self.fpn = MaskFormerFPNModel(*args, feature_size=feature_size, **kwargs) self.mask_projection = nn.Conv2d(feature_size, mask_feature_size, kernel_size=3, padding=1) def forward( self, features: List[Tensor], output_hidden_states: bool = False, return_dict: bool = True ) -> MaskFormerPixelDecoderOutput: fpn_features = self.fpn(features) # we use the last feature map last_feature_projected = self.mask_projection(fpn_features[-1]) if not return_dict: return (last_feature_projected, tuple(fpn_features)) if output_hidden_states else (last_feature_projected,) return MaskFormerPixelDecoderOutput( last_hidden_state=last_feature_projected, hidden_states=tuple(fpn_features) if output_hidden_states else () ) # copied and adapted from original implementation, also practically equal to DetrSinePositionEmbedding class MaskFormerSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__( self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None ): super().__init__() if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") self.num_pos_feats = num_pos_feats self.temperature = temperature self.normalize = normalize self.scale = 2 * math.pi if scale is None else scale def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor: if mask is None: mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) not_mask = (~mask).to(x.dtype) y_embed = not_mask.cumsum(1) x_embed = not_mask.cumsum(2) if self.normalize: eps = 1e-6 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=x.device).type_as(x) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos class PredictionBlock(nn.Module): def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None: super().__init__() self.layers = [nn.Linear(in_dim, out_dim), activation] # Maintain submodule indexing as if part of a Sequential block for i, layer in enumerate(self.layers): self.add_module(str(i), layer) def forward(self, input: Tensor) -> Tensor: hidden_state = input for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state class MaskformerMLPPredictionHead(nn.Module): def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int = 3): """ A classic Multi Layer Perceptron (MLP). Args: input_dim (`int`): The input dimensions. hidden_dim (`int`): The hidden dimensions. output_dim (`int`): The output dimensions. num_layers (int, *optional*, defaults to 3): The number of layers. """ super().__init__() in_dims = [input_dim] + [hidden_dim] * (num_layers - 1) out_dims = [hidden_dim] * (num_layers - 1) + [output_dim] self.layers = [] for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)): activation = nn.ReLU() if i < num_layers - 1 else nn.Identity() layer = PredictionBlock(in_dim, out_dim, activation=activation) self.layers.append(layer) # Provide backwards compatibility from when the class inherited from nn.Sequential # In nn.Sequential subclasses, the name given to the layer is its index in the sequence. # In nn.Module subclasses they derived from the instance attribute they are assigned to e.g. # self.my_layer_name = Layer() # We can't give instance attributes integer names i.e. self.0 is not permitted and so need to register # explicitly self.add_module(str(i), layer) def forward(self, input: Tensor) -> Tensor: hidden_state = input for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state class MaskFormerPixelLevelModule(nn.Module): def __init__(self, config: MaskFormerConfig): """ Pixel Level Module proposed in [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278). It runs the input image through a backbone and a pixel decoder, generating an image feature map and pixel embeddings. Args: config ([`MaskFormerConfig`]): The configuration used to instantiate this model. """ super().__init__() if getattr(config, "backbone_config") is not None and config.backbone_config.model_type == "swin": # for backwards compatibility backbone_config = config.backbone_config backbone_config = MaskFormerSwinConfig.from_dict(backbone_config.to_dict()) backbone_config.out_features = ["stage1", "stage2", "stage3", "stage4"] config.backbone_config = backbone_config self.encoder = load_backbone(config) feature_channels = self.encoder.channels self.decoder = MaskFormerPixelDecoder( in_features=feature_channels[-1], feature_size=config.fpn_feature_size, mask_feature_size=config.mask_feature_size, lateral_widths=feature_channels[:-1], ) def forward( self, pixel_values: Tensor, output_hidden_states: bool = False, return_dict: bool = True ) -> MaskFormerPixelLevelModuleOutput: features = self.encoder(pixel_values).feature_maps decoder_output = self.decoder(features, output_hidden_states, return_dict=return_dict) if not return_dict: last_hidden_state = decoder_output[0] outputs = (features[-1], last_hidden_state) if output_hidden_states: hidden_states = decoder_output[1] outputs = outputs + (tuple(features),) + (hidden_states,) return outputs return MaskFormerPixelLevelModuleOutput( # the last feature is actually the output from the last layer encoder_last_hidden_state=features[-1], decoder_last_hidden_state=decoder_output.last_hidden_state, encoder_hidden_states=tuple(features) if output_hidden_states else (), decoder_hidden_states=decoder_output.hidden_states if output_hidden_states else (), ) class MaskFormerTransformerModule(nn.Module): """ The MaskFormer's transformer module. """ def __init__(self, in_features: int, config: MaskFormerConfig): super().__init__() hidden_size = config.decoder_config.hidden_size should_project = in_features != hidden_size self.position_embedder = MaskFormerSinePositionEmbedding(num_pos_feats=hidden_size // 2, normalize=True) self.queries_embedder = nn.Embedding(config.decoder_config.num_queries, hidden_size) self.input_projection = nn.Conv2d(in_features, hidden_size, kernel_size=1) if should_project else None self.decoder = DetrDecoder(config=config.decoder_config) def forward( self, image_features: Tensor, output_hidden_states: bool = False, output_attentions: bool = False, return_dict: Optional[bool] = None, ) -> DetrDecoderOutput: if self.input_projection is not None: image_features = self.input_projection(image_features) object_queries = self.position_embedder(image_features) # repeat the queries "q c -> b q c" batch_size = image_features.shape[0] queries_embeddings = self.queries_embedder.weight.unsqueeze(0).repeat(batch_size, 1, 1) inputs_embeds = torch.zeros_like(queries_embeddings, requires_grad=True) batch_size, num_channels, height, width = image_features.shape # rearrange both image_features and object_queries "b c h w -> b (h w) c" image_features = image_features.view(batch_size, num_channels, height * width).permute(0, 2, 1) object_queries = object_queries.view(batch_size, num_channels, height * width).permute(0, 2, 1) decoder_output: DetrDecoderOutput = self.decoder( inputs_embeds=inputs_embeds, attention_mask=None, encoder_hidden_states=image_features, encoder_attention_mask=None, object_queries=object_queries, query_position_embeddings=queries_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return decoder_output MASKFORMER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MaskFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MASKFORMER_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MaskFormerImageProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of Detr's decoder attention layers. return_dict (`bool`, *optional*): Whether or not to return a [`~MaskFormerModelOutput`] instead of a plain tuple. """ class MaskFormerPreTrainedModel(PreTrainedModel): config_class = MaskFormerConfig base_model_prefix = "model" main_input_name = "pixel_values" def _init_weights(self, module: nn.Module): xavier_std = self.config.init_xavier_std std = self.config.init_std if isinstance(module, MaskFormerTransformerModule): if module.input_projection is not None: nn.init.xavier_uniform_(module.input_projection.weight, gain=xavier_std) nn.init.constant_(module.input_projection.bias, 0) # FPN elif isinstance(module, MaskFormerFPNModel): nn.init.xavier_uniform_(module.stem.get_submodule("0").weight, gain=xavier_std) elif isinstance(module, MaskFormerFPNLayer): nn.init.xavier_uniform_(module.proj[0].weight, gain=xavier_std) elif isinstance(module, MaskFormerFPNConvLayer): nn.init.xavier_uniform_(module.get_submodule("0").weight, gain=xavier_std) # The MLP head elif isinstance(module, MaskformerMLPPredictionHead): # I was not able to find the correct initializer in the original implementation # we'll use xavier for submodule in module.modules(): if isinstance(submodule, nn.Linear): nn.init.xavier_uniform_(submodule.weight, gain=xavier_std) nn.init.constant_(submodule.bias, 0) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) # copied from DETR if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @add_start_docstrings( "The bare MaskFormer Model outputting raw hidden-states without any specific head on top.", MASKFORMER_START_DOCSTRING, ) class MaskFormerModel(MaskFormerPreTrainedModel): def __init__(self, config: MaskFormerConfig): super().__init__(config) self.pixel_level_module = MaskFormerPixelLevelModule(config) self.transformer_module = MaskFormerTransformerModule( in_features=self.pixel_level_module.encoder.channels[-1], config=config ) self.post_init() @add_start_docstrings_to_model_forward(MASKFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MaskFormerModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Tensor, pixel_mask: Optional[Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> MaskFormerModelOutput: r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, MaskFormerModel >>> from PIL import Image >>> import requests >>> # load MaskFormer fine-tuned on ADE20k semantic segmentation >>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-ade") >>> model = MaskFormerModel.from_pretrained("facebook/maskformer-swin-base-ade") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor(image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # the decoder of MaskFormer outputs hidden states of shape (batch_size, num_queries, hidden_size) >>> transformer_decoder_last_hidden_state = outputs.transformer_decoder_last_hidden_state >>> list(transformer_decoder_last_hidden_state.shape) [1, 100, 256] ```""" if pixel_values is None: raise ValueError("You have to specify pixel_values") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, _, height, width = pixel_values.shape if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device) pixel_level_module_output = self.pixel_level_module( pixel_values, output_hidden_states, return_dict=return_dict ) image_features = pixel_level_module_output[0] pixel_embeddings = pixel_level_module_output[1] transformer_module_output = self.transformer_module(image_features, output_hidden_states, output_attentions) queries = transformer_module_output.last_hidden_state encoder_hidden_states = None pixel_decoder_hidden_states = None transformer_decoder_hidden_states = None hidden_states = None if output_hidden_states: encoder_hidden_states = pixel_level_module_output[2] pixel_decoder_hidden_states = pixel_level_module_output[3] transformer_decoder_hidden_states = transformer_module_output[1] hidden_states = encoder_hidden_states + pixel_decoder_hidden_states + transformer_decoder_hidden_states output = MaskFormerModelOutput( encoder_last_hidden_state=image_features, pixel_decoder_last_hidden_state=pixel_embeddings, transformer_decoder_last_hidden_state=queries, encoder_hidden_states=encoder_hidden_states, pixel_decoder_hidden_states=pixel_decoder_hidden_states, transformer_decoder_hidden_states=transformer_decoder_hidden_states, hidden_states=hidden_states, attentions=transformer_module_output.attentions, ) if not return_dict: output = tuple(v for v in output.values()) return output class MaskFormerForInstanceSegmentation(MaskFormerPreTrainedModel): def __init__(self, config: MaskFormerConfig): super().__init__(config) self.model = MaskFormerModel(config) hidden_size = config.decoder_config.hidden_size # + 1 because we add the "null" class self.class_predictor = nn.Linear(hidden_size, config.num_labels + 1) self.mask_embedder = MaskformerMLPPredictionHead(hidden_size, hidden_size, config.mask_feature_size) self.matcher = MaskFormerHungarianMatcher( cost_class=1.0, cost_dice=config.dice_weight, cost_mask=config.mask_weight ) self.weight_dict: Dict[str, float] = { "loss_cross_entropy": config.cross_entropy_weight, "loss_mask": config.mask_weight, "loss_dice": config.dice_weight, } self.criterion = MaskFormerLoss( config.num_labels, matcher=self.matcher, weight_dict=self.weight_dict, eos_coef=config.no_object_weight, ) self.post_init() def get_loss_dict( self, masks_queries_logits: Tensor, class_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, auxiliary_logits: Dict[str, Tensor], ) -> Dict[str, Tensor]: loss_dict: Dict[str, Tensor] = self.criterion( masks_queries_logits, class_queries_logits, mask_labels, class_labels, auxiliary_logits ) # weight each loss by `self.weight_dict[<LOSS_NAME>]` including auxiliary losses for key, weight in self.weight_dict.items(): for loss_key, loss in loss_dict.items(): if key in loss_key: loss *= weight return loss_dict def get_loss(self, loss_dict: Dict[str, Tensor]) -> Tensor: return sum(loss_dict.values()) def get_logits(self, outputs: MaskFormerModelOutput) -> Tuple[Tensor, Tensor, Dict[str, Tensor]]: pixel_embeddings = outputs.pixel_decoder_last_hidden_state # get the auxiliary predictions (one for each decoder's layer) auxiliary_logits: List[str, Tensor] = [] is_tracing = torch.jit.is_tracing() or isinstance(outputs, torch.fx.Proxy) or is_torchdynamo_compiling() # This code is a little bit cumbersome, an improvement can be to return a list of predictions. If we have auxiliary loss then we are going to return more than one element in the list if self.config.use_auxiliary_loss: stacked_transformer_decoder_outputs = torch.stack(outputs.transformer_decoder_hidden_states) classes = self.class_predictor(stacked_transformer_decoder_outputs) class_queries_logits = classes[-1] # get the masks mask_embeddings = self.mask_embedder(stacked_transformer_decoder_outputs) if is_tracing and not is_torch_greater_or_equal_than_2_1: # Equivalent to einsum('lbqc, bchw -> lbqhw') but jit friendly num_embeddings, batch_size, num_queries, num_channels = mask_embeddings.shape _, _, height, width = pixel_embeddings.shape binaries_masks = torch.zeros( (num_embeddings, batch_size, num_queries, height, width), device=mask_embeddings.device ) for c in range(num_channels): binaries_masks += mask_embeddings[..., c][..., None, None] * pixel_embeddings[None, :, None, c] else: binaries_masks = torch.einsum("lbqc, bchw -> lbqhw", mask_embeddings, pixel_embeddings) masks_queries_logits = binaries_masks[-1] # go til [:-1] because the last one is always used for aux_binary_masks, aux_classes in zip(binaries_masks[:-1], classes[:-1]): auxiliary_logits.append( {"masks_queries_logits": aux_binary_masks, "class_queries_logits": aux_classes} ) else: transformer_decoder_hidden_states = outputs.transformer_decoder_last_hidden_state classes = self.class_predictor(transformer_decoder_hidden_states) class_queries_logits = classes # get the masks mask_embeddings = self.mask_embedder(transformer_decoder_hidden_states) # sum up over the channels if is_tracing and not is_torch_greater_or_equal_than_2_1: # Equivalent to einsum('bqc, bchw -> bqhw') but jit friendly batch_size, num_queries, num_channels = mask_embeddings.shape _, _, height, width = pixel_embeddings.shape masks_queries_logits = torch.zeros( (batch_size, num_queries, height, width), device=mask_embeddings.device ) for c in range(num_channels): masks_queries_logits += mask_embeddings[..., c][..., None, None] * pixel_embeddings[:, None, c] else: masks_queries_logits = torch.einsum("bqc, bchw -> bqhw", mask_embeddings, pixel_embeddings) return class_queries_logits, masks_queries_logits, auxiliary_logits @add_start_docstrings_to_model_forward(MASKFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MaskFormerForInstanceSegmentationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Tensor, mask_labels: Optional[List[Tensor]] = None, class_labels: Optional[List[Tensor]] = None, pixel_mask: Optional[Tensor] = None, output_auxiliary_logits: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> MaskFormerForInstanceSegmentationOutput: r""" mask_labels (`List[torch.Tensor]`, *optional*): List of mask labels of shape `(num_labels, height, width)` to be fed to a model class_labels (`List[torch.LongTensor]`, *optional*): list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. Returns: Examples: Semantic segmentation example: ```python >>> from transformers import AutoImageProcessor, MaskFormerForInstanceSegmentation >>> from PIL import Image >>> import requests >>> # load MaskFormer fine-tuned on ADE20k semantic segmentation >>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-ade") >>> model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-base-ade") >>> url = ( ... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" ... ) >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # model predicts class_queries_logits of shape `(batch_size, num_queries)` >>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` >>> class_queries_logits = outputs.class_queries_logits >>> masks_queries_logits = outputs.masks_queries_logits >>> # you can pass them to image_processor for postprocessing >>> predicted_semantic_map = image_processor.post_process_semantic_segmentation( ... outputs, target_sizes=[image.size[::-1]] ... )[0] >>> # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs) >>> list(predicted_semantic_map.shape) [512, 683] ``` Panoptic segmentation example: ```python >>> from transformers import AutoImageProcessor, MaskFormerForInstanceSegmentation >>> from PIL import Image >>> import requests >>> # load MaskFormer fine-tuned on COCO panoptic segmentation >>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-coco") >>> model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-base-coco") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # model predicts class_queries_logits of shape `(batch_size, num_queries)` >>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` >>> class_queries_logits = outputs.class_queries_logits >>> masks_queries_logits = outputs.masks_queries_logits >>> # you can pass them to image_processor for postprocessing >>> result = image_processor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] >>> # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs) >>> predicted_panoptic_map = result["segmentation"] >>> list(predicted_panoptic_map.shape) [480, 640] ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict raw_outputs = self.model( pixel_values, pixel_mask, output_hidden_states=output_hidden_states or self.config.use_auxiliary_loss, return_dict=return_dict, output_attentions=output_attentions, ) # We need to have raw_outputs optionally be returned as a dict to use torch.compile. For backwards # compatibility we convert to a dataclass for the rest of the model logic outputs = MaskFormerModelOutput( encoder_last_hidden_state=raw_outputs[0], pixel_decoder_last_hidden_state=raw_outputs[1], transformer_decoder_last_hidden_state=raw_outputs[2], encoder_hidden_states=raw_outputs[3] if output_hidden_states else None, pixel_decoder_hidden_states=raw_outputs[4] if output_hidden_states else None, transformer_decoder_hidden_states=raw_outputs[5] if output_hidden_states else None, hidden_states=raw_outputs[6] if output_hidden_states else None, attentions=raw_outputs[-1] if output_attentions else None, ) loss, loss_dict, auxiliary_logits = None, None, None class_queries_logits, masks_queries_logits, auxiliary_logits = self.get_logits(outputs) if mask_labels is not None and class_labels is not None: loss_dict: Dict[str, Tensor] = self.get_loss_dict( masks_queries_logits, class_queries_logits, mask_labels, class_labels, auxiliary_logits ) loss = self.get_loss(loss_dict) output_auxiliary_logits = ( self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits ) if not output_auxiliary_logits: auxiliary_logits = None if not return_dict: output = tuple( v for v in (loss, class_queries_logits, masks_queries_logits, auxiliary_logits, *outputs.values()) if v is not None ) return output return MaskFormerForInstanceSegmentationOutput( loss=loss, **outputs, class_queries_logits=class_queries_logits, masks_queries_logits=masks_queries_logits, auxiliary_logits=auxiliary_logits, )
transformers/src/transformers/models/maskformer/modeling_maskformer.py/0
{ "file_path": "transformers/src/transformers/models/maskformer/modeling_maskformer.py", "repo_id": "transformers", "token_count": 37600 }
376
# coding=utf-8 # Copyright 2023 Mixtral AI and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mixtral model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class MixtralConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an Mixtral model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mixtral-7B-v0.1 or Mixtral-7B-Instruct-v0.1. [mixtralai/Mixtral-8x7B](https://huggingface.co/mixtralai/Mixtral-8x7B) [mixtralai/Mixtral-7B-Instruct-v0.1](https://huggingface.co/mixtralai/Mixtral-7B-Instruct-v0.1) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MixtralModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 14336): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to `4096*32`): The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention allows sequence of up to 4096*32 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_theta (`float`, *optional*, defaults to 1000000.0): The base period of the RoPE embeddings. sliding_window (`int`, *optional*): Sliding window attention window size. If not specified, will default to `4096`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. num_experts_per_tok (`int`, *optional*, defaults to 2): The number of experts to route per-token, can be also interpreted as the `top-k` routing parameter num_local_experts (`int`, *optional*, defaults to 8): Number of experts per Sparse MLP layer. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabeling this will also allow the model to output the auxiliary loss. See [here]() for more details router_aux_loss_coef (`float`, *optional*, defaults to 0.001): The aux loss factor for the total loss. router_jitter_noise (`float`, *optional*, defaults to 0.0): Amount of noise to add to the router. ```python >>> from transformers import MixtralModel, MixtralConfig >>> # Initializing a Mixtral 7B style configuration >>> configuration = MixtralConfig() >>> # Initializing a model from the Mixtral 7B style configuration >>> model = MixtralModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mixtral" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act="silu", max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-5, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=1e6, sliding_window=None, attention_dropout=0.0, num_experts_per_tok=2, num_local_experts=8, output_router_logits=False, router_aux_loss_coef=0.001, router_jitter_noise=0.0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_dropout = attention_dropout self.num_experts_per_tok = num_experts_per_tok self.num_local_experts = num_local_experts self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef self.router_jitter_noise = router_jitter_noise super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, )
transformers/src/transformers/models/mixtral/configuration_mixtral.py/0
{ "file_path": "transformers/src/transformers/models/mixtral/configuration_mixtral.py", "repo_id": "transformers", "token_count": 3175 }
377
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for MobileNetV1.""" import warnings from ...utils import logging from .image_processing_mobilenet_v1 import MobileNetV1ImageProcessor logger = logging.get_logger(__name__) class MobileNetV1FeatureExtractor(MobileNetV1ImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class MobileNetV1FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use MobileNetV1ImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
transformers/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py/0
{ "file_path": "transformers/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py", "repo_id": "transformers", "token_count": 381 }
378
# coding=utf-8 # Copyright 2023 University of Wisconsin-Madison and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MRA model.""" import math from pathlib import Path from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.utils.cpp_extension import load from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_ninja_available, is_torch_cuda_available, logging, ) from .configuration_mra import MraConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "uw-madison/mra-base-512-4" _CONFIG_FOR_DOC = "MraConfig" _TOKENIZER_FOR_DOC = "AutoTokenizer" mra_cuda_kernel = None def load_cuda_kernels(): global mra_cuda_kernel src_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "mra" def append_root(files): return [src_folder / file for file in files] src_files = append_root(["cuda_kernel.cu", "cuda_launch.cu", "torch_extension.cpp"]) mra_cuda_kernel = load("cuda_kernel", src_files, verbose=True) def sparse_max(sparse_qk_prod, indices, query_num_block, key_num_block): """ Computes maximum values for softmax stability. """ if len(sparse_qk_prod.size()) != 4: raise ValueError("sparse_qk_prod must be a 4-dimensional tensor.") if len(indices.size()) != 2: raise ValueError("indices must be a 2-dimensional tensor.") if sparse_qk_prod.size(2) != 32: raise ValueError("The size of the second dimension of sparse_qk_prod must be 32.") if sparse_qk_prod.size(3) != 32: raise ValueError("The size of the third dimension of sparse_qk_prod must be 32.") index_vals = sparse_qk_prod.max(dim=-2).values.transpose(-1, -2) index_vals = index_vals.contiguous() indices = indices.int() indices = indices.contiguous() max_vals, max_vals_scatter = mra_cuda_kernel.index_max(index_vals, indices, query_num_block, key_num_block) max_vals_scatter = max_vals_scatter.transpose(-1, -2)[:, :, None, :] return max_vals, max_vals_scatter def sparse_mask(mask, indices, block_size=32): """ Converts attention mask to a sparse mask for high resolution logits. """ if len(mask.size()) != 2: raise ValueError("mask must be a 2-dimensional tensor.") if len(indices.size()) != 2: raise ValueError("indices must be a 2-dimensional tensor.") if mask.shape[0] != indices.shape[0]: raise ValueError("mask and indices must have the same size in the zero-th dimension.") batch_size, seq_len = mask.shape num_block = seq_len // block_size batch_idx = torch.arange(indices.size(0), dtype=torch.long, device=indices.device) mask = mask.reshape(batch_size, num_block, block_size) mask = mask[batch_idx[:, None], (indices % num_block).long(), :] return mask def mm_to_sparse(dense_query, dense_key, indices, block_size=32): """ Performs Sampled Dense Matrix Multiplication. """ batch_size, query_size, dim = dense_query.size() _, key_size, dim = dense_key.size() if query_size % block_size != 0: raise ValueError("query_size (size of first dimension of dense_query) must be divisible by block_size.") if key_size % block_size != 0: raise ValueError("key_size (size of first dimension of dense_key) must be divisible by block_size.") dense_query = dense_query.reshape(batch_size, query_size // block_size, block_size, dim).transpose(-1, -2) dense_key = dense_key.reshape(batch_size, key_size // block_size, block_size, dim).transpose(-1, -2) if len(dense_query.size()) != 4: raise ValueError("dense_query must be a 4-dimensional tensor.") if len(dense_key.size()) != 4: raise ValueError("dense_key must be a 4-dimensional tensor.") if len(indices.size()) != 2: raise ValueError("indices must be a 2-dimensional tensor.") if dense_query.size(3) != 32: raise ValueError("The third dimension of dense_query must be 32.") if dense_key.size(3) != 32: raise ValueError("The third dimension of dense_key must be 32.") dense_query = dense_query.contiguous() dense_key = dense_key.contiguous() indices = indices.int() indices = indices.contiguous() return mra_cuda_kernel.mm_to_sparse(dense_query, dense_key, indices.int()) def sparse_dense_mm(sparse_query, indices, dense_key, query_num_block, block_size=32): """ Performs matrix multiplication of a sparse matrix with a dense matrix. """ batch_size, key_size, dim = dense_key.size() if key_size % block_size != 0: raise ValueError("key_size (size of first dimension of dense_key) must be divisible by block_size.") if sparse_query.size(2) != block_size: raise ValueError("The size of the second dimension of sparse_query must be equal to the block_size.") if sparse_query.size(3) != block_size: raise ValueError("The size of the third dimension of sparse_query must be equal to the block_size.") dense_key = dense_key.reshape(batch_size, key_size // block_size, block_size, dim).transpose(-1, -2) if len(sparse_query.size()) != 4: raise ValueError("sparse_query must be a 4-dimensional tensor.") if len(dense_key.size()) != 4: raise ValueError("dense_key must be a 4-dimensional tensor.") if len(indices.size()) != 2: raise ValueError("indices must be a 2-dimensional tensor.") if dense_key.size(3) != 32: raise ValueError("The size of the third dimension of dense_key must be 32.") sparse_query = sparse_query.contiguous() indices = indices.int() indices = indices.contiguous() dense_key = dense_key.contiguous() dense_qk_prod = mra_cuda_kernel.sparse_dense_mm(sparse_query, indices, dense_key, query_num_block) dense_qk_prod = dense_qk_prod.transpose(-1, -2).reshape(batch_size, query_num_block * block_size, dim) return dense_qk_prod def transpose_indices(indices, dim_1_block, dim_2_block): return ((indices % dim_2_block) * dim_1_block + torch.div(indices, dim_2_block, rounding_mode="floor")).long() class MraSampledDenseMatMul(torch.autograd.Function): @staticmethod def forward(ctx, dense_query, dense_key, indices, block_size): sparse_qk_prod = mm_to_sparse(dense_query, dense_key, indices, block_size) ctx.save_for_backward(dense_query, dense_key, indices) ctx.block_size = block_size return sparse_qk_prod @staticmethod def backward(ctx, grad): dense_query, dense_key, indices = ctx.saved_tensors block_size = ctx.block_size query_num_block = dense_query.size(1) // block_size key_num_block = dense_key.size(1) // block_size indices_T = transpose_indices(indices, query_num_block, key_num_block) grad_key = sparse_dense_mm(grad.transpose(-1, -2), indices_T, dense_query, key_num_block) grad_query = sparse_dense_mm(grad, indices, dense_key, query_num_block) return grad_query, grad_key, None, None @staticmethod def operator_call(dense_query, dense_key, indices, block_size=32): return MraSampledDenseMatMul.apply(dense_query, dense_key, indices, block_size) class MraSparseDenseMatMul(torch.autograd.Function): @staticmethod def forward(ctx, sparse_query, indices, dense_key, query_num_block): sparse_qk_prod = sparse_dense_mm(sparse_query, indices, dense_key, query_num_block) ctx.save_for_backward(sparse_query, indices, dense_key) ctx.query_num_block = query_num_block return sparse_qk_prod @staticmethod def backward(ctx, grad): sparse_query, indices, dense_key = ctx.saved_tensors query_num_block = ctx.query_num_block key_num_block = dense_key.size(1) // sparse_query.size(-1) indices_T = transpose_indices(indices, query_num_block, key_num_block) grad_key = sparse_dense_mm(sparse_query.transpose(-1, -2), indices_T, grad, key_num_block) grad_query = mm_to_sparse(grad, dense_key, indices) return grad_query, None, grad_key, None @staticmethod def operator_call(sparse_query, indices, dense_key, query_num_block): return MraSparseDenseMatMul.apply(sparse_query, indices, dense_key, query_num_block) class MraReduceSum: @staticmethod def operator_call(sparse_query, indices, query_num_block, key_num_block): batch_size, num_block, block_size, _ = sparse_query.size() if len(sparse_query.size()) != 4: raise ValueError("sparse_query must be a 4-dimensional tensor.") if len(indices.size()) != 2: raise ValueError("indices must be a 2-dimensional tensor.") _, _, block_size, _ = sparse_query.size() batch_size, num_block = indices.size() sparse_query = sparse_query.sum(dim=2).reshape(batch_size * num_block, block_size) batch_idx = torch.arange(indices.size(0), dtype=torch.long, device=indices.device) global_idxes = ( torch.div(indices, key_num_block, rounding_mode="floor").long() + batch_idx[:, None] * query_num_block ).reshape(batch_size * num_block) temp = torch.zeros( (batch_size * query_num_block, block_size), dtype=sparse_query.dtype, device=sparse_query.device ) output = temp.index_add(0, global_idxes, sparse_query).reshape(batch_size, query_num_block, block_size) output = output.reshape(batch_size, query_num_block * block_size) return output def get_low_resolution_logit(query, key, block_size, mask=None, value=None): """ Compute low resolution approximation. """ batch_size, seq_len, head_dim = query.size() num_block_per_row = seq_len // block_size value_hat = None if mask is not None: token_count = mask.reshape(batch_size, num_block_per_row, block_size).sum(dim=-1) query_hat = query.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / ( token_count[:, :, None] + 1e-6 ) key_hat = key.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / ( token_count[:, :, None] + 1e-6 ) if value is not None: value_hat = value.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / ( token_count[:, :, None] + 1e-6 ) else: token_count = block_size * torch.ones(batch_size, num_block_per_row, dtype=torch.float, device=query.device) query_hat = query.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2) key_hat = key.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2) if value is not None: value_hat = value.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2) low_resolution_logit = torch.matmul(query_hat, key_hat.transpose(-1, -2)) / math.sqrt(head_dim) low_resolution_logit_row_max = low_resolution_logit.max(dim=-1, keepdims=True).values if mask is not None: low_resolution_logit = ( low_resolution_logit - 1e4 * ((token_count[:, None, :] * token_count[:, :, None]) < 0.5).float() ) return low_resolution_logit, token_count, low_resolution_logit_row_max, value_hat def get_block_idxes( low_resolution_logit, num_blocks, approx_mode, initial_prior_first_n_blocks, initial_prior_diagonal_n_blocks ): """ Compute the indices of the subset of components to be used in the approximation. """ batch_size, total_blocks_per_row, _ = low_resolution_logit.shape if initial_prior_diagonal_n_blocks > 0: offset = initial_prior_diagonal_n_blocks // 2 temp_mask = torch.ones(total_blocks_per_row, total_blocks_per_row, device=low_resolution_logit.device) diagonal_mask = torch.tril(torch.triu(temp_mask, diagonal=-offset), diagonal=offset) low_resolution_logit = low_resolution_logit + diagonal_mask[None, :, :] * 5e3 if initial_prior_first_n_blocks > 0: low_resolution_logit[:, :initial_prior_first_n_blocks, :] = ( low_resolution_logit[:, :initial_prior_first_n_blocks, :] + 5e3 ) low_resolution_logit[:, :, :initial_prior_first_n_blocks] = ( low_resolution_logit[:, :, :initial_prior_first_n_blocks] + 5e3 ) top_k_vals = torch.topk( low_resolution_logit.reshape(batch_size, -1), num_blocks, dim=-1, largest=True, sorted=False ) indices = top_k_vals.indices if approx_mode == "full": threshold = top_k_vals.values.min(dim=-1).values high_resolution_mask = (low_resolution_logit >= threshold[:, None, None]).float() elif approx_mode == "sparse": high_resolution_mask = None else: raise ValueError(f"{approx_mode} is not a valid approx_model value.") return indices, high_resolution_mask def mra2_attention( query, key, value, mask, num_blocks, approx_mode, block_size=32, initial_prior_first_n_blocks=0, initial_prior_diagonal_n_blocks=0, ): """ Use Mra to approximate self-attention. """ if mra_cuda_kernel is None: return torch.zeros_like(query).requires_grad_() batch_size, num_head, seq_len, head_dim = query.size() meta_batch = batch_size * num_head if seq_len % block_size != 0: raise ValueError("sequence length must be divisible by the block_size.") num_block_per_row = seq_len // block_size query = query.reshape(meta_batch, seq_len, head_dim) key = key.reshape(meta_batch, seq_len, head_dim) value = value.reshape(meta_batch, seq_len, head_dim) if mask is not None: query = query * mask[:, :, None] key = key * mask[:, :, None] value = value * mask[:, :, None] if approx_mode == "full": low_resolution_logit, token_count, low_resolution_logit_row_max, value_hat = get_low_resolution_logit( query, key, block_size, mask, value ) elif approx_mode == "sparse": with torch.no_grad(): low_resolution_logit, token_count, low_resolution_logit_row_max, _ = get_low_resolution_logit( query, key, block_size, mask ) else: raise Exception('approx_mode must be "full" or "sparse"') with torch.no_grad(): low_resolution_logit_normalized = low_resolution_logit - low_resolution_logit_row_max indices, high_resolution_mask = get_block_idxes( low_resolution_logit_normalized, num_blocks, approx_mode, initial_prior_first_n_blocks, initial_prior_diagonal_n_blocks, ) high_resolution_logit = MraSampledDenseMatMul.operator_call( query, key, indices, block_size=block_size ) / math.sqrt(head_dim) max_vals, max_vals_scatter = sparse_max(high_resolution_logit, indices, num_block_per_row, num_block_per_row) high_resolution_logit = high_resolution_logit - max_vals_scatter if mask is not None: high_resolution_logit = high_resolution_logit - 1e4 * (1 - sparse_mask(mask, indices)[:, :, :, None]) high_resolution_attn = torch.exp(high_resolution_logit) high_resolution_attn_out = MraSparseDenseMatMul.operator_call( high_resolution_attn, indices, value, num_block_per_row ) high_resolution_normalizer = MraReduceSum.operator_call( high_resolution_attn, indices, num_block_per_row, num_block_per_row ) if approx_mode == "full": low_resolution_attn = ( torch.exp(low_resolution_logit - low_resolution_logit_row_max - 1e4 * high_resolution_mask) * token_count[:, None, :] ) low_resolution_attn_out = ( torch.matmul(low_resolution_attn, value_hat)[:, :, None, :] .repeat(1, 1, block_size, 1) .reshape(meta_batch, seq_len, head_dim) ) low_resolution_normalizer = ( low_resolution_attn.sum(dim=-1)[:, :, None].repeat(1, 1, block_size).reshape(meta_batch, seq_len) ) log_correction = low_resolution_logit_row_max.repeat(1, 1, block_size).reshape(meta_batch, seq_len) - max_vals if mask is not None: log_correction = log_correction * mask low_resolution_corr = torch.exp(log_correction * (log_correction <= 0).float()) low_resolution_attn_out = low_resolution_attn_out * low_resolution_corr[:, :, None] low_resolution_normalizer = low_resolution_normalizer * low_resolution_corr high_resolution_corr = torch.exp(-log_correction * (log_correction > 0).float()) high_resolution_attn_out = high_resolution_attn_out * high_resolution_corr[:, :, None] high_resolution_normalizer = high_resolution_normalizer * high_resolution_corr context_layer = (high_resolution_attn_out + low_resolution_attn_out) / ( high_resolution_normalizer[:, :, None] + low_resolution_normalizer[:, :, None] + 1e-6 ) elif approx_mode == "sparse": context_layer = high_resolution_attn_out / (high_resolution_normalizer[:, :, None] + 1e-6) else: raise Exception('config.approx_mode must be "full" or "sparse"') if mask is not None: context_layer = context_layer * mask[:, :, None] context_layer = context_layer.reshape(batch_size, num_head, seq_len, head_dim) return context_layer class MraEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MraSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) kernel_loaded = mra_cuda_kernel is not None if is_torch_cuda_available() and is_ninja_available() and not kernel_loaded: try: load_cuda_kernels() except Exception as e: logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = ( position_embedding_type if position_embedding_type is not None else config.position_embedding_type ) self.num_block = (config.max_position_embeddings // 32) * config.block_per_row self.num_block = min(self.num_block, int((config.max_position_embeddings // 32) ** 2)) self.approx_mode = config.approx_mode self.initial_prior_first_n_blocks = config.initial_prior_first_n_blocks self.initial_prior_diagonal_n_blocks = config.initial_prior_diagonal_n_blocks def transpose_for_scores(self, layer): new_layer_shape = layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size) layer = layer.view(*new_layer_shape) return layer.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None): mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) batch_size, num_heads, seq_len, head_dim = query_layer.size() # revert changes made by get_extended_attention_mask attention_mask = 1.0 + attention_mask / 10000.0 attention_mask = ( attention_mask.squeeze().repeat(1, num_heads, 1).reshape(batch_size * num_heads, seq_len).int() ) # The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs # smaller than this are padded with zeros. gpu_warp_size = 32 if head_dim < gpu_warp_size: pad_size = batch_size, num_heads, seq_len, gpu_warp_size - head_dim query_layer = torch.cat([query_layer, torch.zeros(pad_size, device=query_layer.device)], dim=-1) key_layer = torch.cat([key_layer, torch.zeros(pad_size, device=key_layer.device)], dim=-1) value_layer = torch.cat([value_layer, torch.zeros(pad_size, device=value_layer.device)], dim=-1) context_layer = mra2_attention( query_layer.float(), key_layer.float(), value_layer.float(), attention_mask.float(), self.num_block, approx_mode=self.approx_mode, initial_prior_first_n_blocks=self.initial_prior_first_n_blocks, initial_prior_diagonal_n_blocks=self.initial_prior_diagonal_n_blocks, ) if head_dim < gpu_warp_size: context_layer = context_layer[:, :, :, :head_dim] context_layer = context_layer.reshape(batch_size, num_heads, seq_len, head_dim) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class MraSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MraAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = MraSelfAttention(config, position_embedding_type=position_embedding_type) self.output = MraSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None): self_outputs = self.self(hidden_states, attention_mask) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class MraIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class MraOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MraLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = MraAttention(config) self.add_cross_attention = config.add_cross_attention self.intermediate = MraIntermediate(config) self.output = MraOutput(config) def forward(self, hidden_states, attention_mask=None): self_attention_outputs = self.attention(hidden_states, attention_mask) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class MraEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([MraLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, ) else: layer_outputs = layer_module(hidden_states, attention_mask) hidden_states = layer_outputs[0] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform class MraPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Mra class MraLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = MraPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Mra class MraOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = MraLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores # Copied from transformers.models.yoso.modeling_yoso.YosoPreTrainedModel with Yoso->Mra,yoso->mra class MraPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MraConfig base_model_prefix = "mra" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) MRA_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MraConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MRA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MRA Model transformer outputting raw hidden-states without any specific head on top.", MRA_START_DOCSTRING, ) class MraModel(MraPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = MraEmbeddings(config) self.encoder = MraEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithCrossAttentions( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings("""MRA Model with a `language modeling` head on top.""", MRA_START_DOCSTRING) class MraForMaskedLM(MraPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.mra = MraModel(config) self.cls = MraOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mra( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.yoso.modeling_yoso.YosoClassificationHead with Yoso->Mra class MraClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """MRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.""", MRA_START_DOCSTRING, ) class MraForSequenceClassification(MraPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mra = MraModel(config) self.classifier = MraClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mra( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """MRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""", MRA_START_DOCSTRING, ) class MraForMultipleChoice(MraPreTrainedModel): def __init__(self, config): super().__init__(config) self.mra = MraModel(config) self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.mra( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = outputs[0] # (bs * num_choices, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs * num_choices, dim) pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim) pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """MRA Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""", MRA_START_DOCSTRING, ) class MraForTokenClassification(MraPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mra = MraModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mra( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """MRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""", MRA_START_DOCSTRING, ) class MraForQuestionAnswering(MraPreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.mra = MraModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mra( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
transformers/src/transformers/models/mra/modeling_mra.py/0
{ "file_path": "transformers/src/transformers/models/mra/modeling_mra.py", "repo_id": "transformers", "token_count": 26086 }
379
# coding=utf-8 # Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Text/audio processor class for MusicGen Melody """ from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class MusicgenMelodyProcessor(ProcessorMixin): r""" Constructs a MusicGen Melody processor which wraps a Wav2Vec2 feature extractor - for raw audio waveform processing - and a T5 tokenizer into a single processor class. [`MusicgenProcessor`] offers all the functionalities of [`MusicgenMelodyFeatureExtractor`] and [`T5Tokenizer`]. See [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information. Args: feature_extractor (`MusicgenMelodyFeatureExtractor`): An instance of [`MusicgenMelodyFeatureExtractor`]. The feature extractor is a required input. tokenizer (`T5Tokenizer`): An instance of [`T5Tokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "MusicgenMelodyFeatureExtractor" tokenizer_class = ("T5Tokenizer", "T5TokenizerFast") def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) # Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.get_decoder_prompt_ids def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps) def __call__(self, audio=None, text=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `audio` and `kwargs` arguments to MusicgenMelodyFeatureExtractor's [`~MusicgenMelodyFeatureExtractor.__call__`] if `audio` is not `None` to pre-process the audio. It also forwards the `text` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: audio (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be a mono-stereo signal of shape (T), where T is the sample length of the audio. text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the tokenizer. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **input_features** -- Audio input features to be fed to a model. Returned when `audio` is not `None`. - **attention_mask** -- List of token indices specifying which tokens should be attended to by the model when `text` is not `None`. When only `audio` is specified, returns the timestamps attention mask. """ sampling_rate = kwargs.pop("sampling_rate", None) if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if text is not None: inputs = self.tokenizer(text, **kwargs) if audio is not None: audio_inputs = self.feature_extractor(audio, sampling_rate=sampling_rate, **kwargs) if text is None: return audio_inputs elif audio is None: return inputs else: inputs["input_features"] = audio_inputs["input_features"] return inputs # Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.batch_decode with padding_mask->attention_mask def batch_decode(self, *args, **kwargs): """ This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ audio_values = kwargs.pop("audio", None) attention_mask = kwargs.pop("attention_mask", None) if len(args) > 0: audio_values = args[0] args = args[1:] if audio_values is not None: return self._decode_audio(audio_values, attention_mask=attention_mask) else: return self.tokenizer.batch_decode(*args, **kwargs) # Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.decode def decode(self, *args, **kwargs): """ This method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) # Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor._decode_audio with padding_mask->attention_mask def _decode_audio(self, audio_values, attention_mask: Optional = None) -> List[np.ndarray]: """ This method strips any padding from the audio values to return a list of numpy audio arrays. """ audio_values = to_numpy(audio_values) bsz, channels, seq_len = audio_values.shape if attention_mask is None: return list(audio_values) attention_mask = to_numpy(attention_mask) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) difference = seq_len - attention_mask.shape[-1] padding_value = 1 - self.feature_extractor.padding_value attention_mask = np.pad(attention_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value) audio_values = audio_values.tolist() for i in range(bsz): sliced_audio = np.asarray(audio_values[i])[ attention_mask[i][None, :] != self.feature_extractor.padding_value ] audio_values[i] = sliced_audio.reshape(channels, -1) return audio_values def get_unconditional_inputs(self, num_samples=1, return_tensors="pt"): """ Helper function to get null inputs for unconditional generation, enabling the model to be used without the feature extractor or tokenizer. Args: num_samples (int, *optional*): Number of audio samples to unconditionally generate. Example: ```python >>> from transformers import MusicgenMelodyForConditionalGeneration, MusicgenMelodyProcessor >>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody") >>> # get the unconditional (or 'null') inputs for the model >>> processor = MusicgenMelodyProcessor.from_pretrained("facebook/musicgen-melody") >>> unconditional_inputs = processor.get_unconditional_inputs(num_samples=1) >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256) ```""" inputs = self.tokenizer([""] * num_samples, return_tensors=return_tensors, return_attention_mask=True) inputs["attention_mask"][:] = 0 return inputs
transformers/src/transformers/models/musicgen_melody/processing_musicgen_melody.py/0
{ "file_path": "transformers/src/transformers/models/musicgen_melody/processing_musicgen_melody.py", "repo_id": "transformers", "token_count": 3183 }
380
# coding=utf-8 # Copyright 2023 NllbMoe Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch NLLB-MoE model.""" import math from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_nllb_moe import NllbMoeConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "NllbMoeConfig" _CHECKPOINT_FOR_DOC = "hf-internal-testing/dummy-nllb-moe-2-experts" _REAL_CHECKPOINT_FOR_DOC = "facebook/nllb-moe-54b" #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: router_probs (`torch.Tensor`): Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts]. expert_indices (`torch.Tensor`): Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token. Returns: The auxiliary loss. """ if router_probs is None: return 0 num_experts = router_probs.shape[-1] # cast the expert indices to int64, otherwise one-hot encoding will fail if expert_indices.dtype != torch.int64: expert_indices = expert_indices.to(torch.int64) if len(expert_indices.shape) == 2: expert_indices = expert_indices.unsqueeze(2) expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts) # For a given token, determine if it was routed to a given expert. expert_mask = torch.max(expert_mask, axis=-2).values # cast to float32 otherwise mean will fail expert_mask = expert_mask.to(torch.float32) tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2) router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2) return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2) # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100ScaledWordEmbedding with M2M100->NllbMoe class NllbMoeScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding class NllbMoeSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer("weights", emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward( self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0 ): if input_ids is not None: bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) else: bsz, seq_len = inputs_embeds.size()[:-1] position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len + past_key_values_length if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach() def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length class NllbMoeTop2Router(nn.Module): """ Router using tokens choose top-2 experts assignment. This router uses the same mechanism as in NLLB-MoE from the fairseq repository. Items are sorted by router_probs and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each token is processed by an expert**, or that each expert receives at least one token. The router combining weights are also returned to make sure that the states that are not updated will be masked. """ def __init__(self, config: NllbMoeConfig): super().__init__() self.num_experts = config.num_experts self.expert_capacity = config.expert_capacity self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias) self.router_ignore_padding_tokens = config.router_ignore_padding_tokens self.dtype = getattr(torch, config.router_dtype) self.second_expert_policy = config.second_expert_policy self.normalize_router_prob_before_dropping = config.normalize_router_prob_before_dropping self.batch_prioritized_routing = config.batch_prioritized_routing self.moe_eval_capacity_token_fraction = config.moe_eval_capacity_token_fraction def _cast_classifier(self): r""" `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an instance of the `Linear8bitLt` class by checking special attributes. """ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")): self.classifier = self.classifier.to(self.dtype) def normalize_router_probabilities(self, router_probs, top_1_mask, top_2_mask): top_1_max_probs = (router_probs * top_1_mask).sum(dim=1) top_2_max_probs = (router_probs * top_2_mask).sum(dim=1) denom_s = torch.clamp(top_1_max_probs + top_2_max_probs, min=torch.finfo(router_probs.dtype).eps) top_1_max_probs = top_1_max_probs / denom_s top_2_max_probs = top_2_max_probs / denom_s return top_1_max_probs, top_2_max_probs def route_tokens( self, router_logits: torch.Tensor, input_dtype: torch.dtype = torch.float32, padding_mask: Optional[torch.LongTensor] = None, ) -> Tuple: """ Computes the `dispatch_mask` and the `dispatch_weights` for each experts. The masks are adapted to the expert capacity. """ nb_tokens = router_logits.shape[0] # Apply Softmax and cast back to the original `dtype` router_probs = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(input_dtype) top_1_expert_index = torch.argmax(router_probs, dim=-1) top_1_mask = torch.nn.functional.one_hot(top_1_expert_index, num_classes=self.num_experts) if self.second_expert_policy == "sampling": gumbel = torch.distributions.gumbel.Gumbel(0, 1).rsample router_logits += gumbel(router_logits.shape).to(router_logits.device) # replace top_1_expert_index with min values logits_except_top_1 = router_logits.masked_fill(top_1_mask.bool(), float("-inf")) top_2_expert_index = torch.argmax(logits_except_top_1, dim=-1) top_2_mask = torch.nn.functional.one_hot(top_2_expert_index, num_classes=self.num_experts) if self.normalize_router_prob_before_dropping: top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities( router_probs, top_1_mask, top_2_mask ) if self.second_expert_policy == "random": top_2_max_probs = (router_probs * top_2_mask).sum(dim=1) sampled = (2 * top_2_max_probs) > torch.rand_like(top_2_max_probs.float()) top_2_mask = top_2_mask * sampled.repeat(self.num_experts, 1).transpose(1, 0) if padding_mask is not None and not self.router_ignore_padding_tokens: if len(padding_mask.shape) == 4: # only get the last causal mask padding_mask = padding_mask[:, :, -1, :].reshape(-1)[-nb_tokens:] non_padding = ~padding_mask.bool() top_1_mask = top_1_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype) top_2_mask = top_2_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype) if self.batch_prioritized_routing: # sort tokens based on their routing probability # to make sure important tokens are routed, first importance_scores = -1 * router_probs.max(dim=1)[0] sorted_top_1_mask = top_1_mask[importance_scores.argsort(dim=0)] sorted_cumsum1 = (torch.cumsum(sorted_top_1_mask, dim=0) - 1) * sorted_top_1_mask locations1 = sorted_cumsum1[importance_scores.argsort(dim=0).argsort(dim=0)] sorted_top_2_mask = top_2_mask[importance_scores.argsort(dim=0)] sorted_cumsum2 = (torch.cumsum(sorted_top_2_mask, dim=0) - 1) * sorted_top_2_mask locations2 = sorted_cumsum2[importance_scores.argsort(dim=0).argsort(dim=0)] # Update 2nd's location by accounting for locations of 1st locations2 += torch.sum(top_1_mask, dim=0, keepdim=True) else: locations1 = torch.cumsum(top_1_mask, dim=0) - 1 locations2 = torch.cumsum(top_2_mask, dim=0) - 1 # Update 2nd's location by accounting for locations of 1st locations2 += torch.sum(top_1_mask, dim=0, keepdim=True) if not self.training and self.moe_eval_capacity_token_fraction > 0: self.expert_capacity = math.ceil(self.moe_eval_capacity_token_fraction * nb_tokens) else: capacity = 2 * math.ceil(nb_tokens / self.num_experts) self.expert_capacity = capacity if self.expert_capacity is None else self.expert_capacity # Remove locations outside capacity from ( cumsum < capacity = False will not be routed) top_1_mask = top_1_mask * torch.lt(locations1, self.expert_capacity) top_2_mask = top_2_mask * torch.lt(locations2, self.expert_capacity) if not self.normalize_router_prob_before_dropping: top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities( router_probs, top_1_mask, top_2_mask ) # Calculate combine_weights and dispatch_mask gates1 = top_1_max_probs[:, None] * top_1_mask gates2 = top_2_max_probs[:, None] * top_2_mask router_probs = gates1 + gates2 return top_1_mask, router_probs def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.LongTensor] = None) -> Tuple: r""" The hidden states are reshaped to simplify the computation of the router probabilities (combining weights for each experts.) Args: hidden_states (`torch.Tensor`): (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. Returns: top_1_mask (`torch.Tensor` of shape (batch_size, sequence_length)): Index tensor of shape [batch_size, sequence_length] corresponding to the expert selected for each token using the top1 probabilities of the router. router_probabilities (`torch.Tensor` of shape (batch_size, sequence_length, nump_experts)): Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each token and expert. Used for routing tokens to experts. router_logits (`torch.Tensor` of shape (batch_size, sequence_length))): Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. This is used later for computing router z-loss. """ self.input_dtype = hidden_states.dtype batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.reshape((batch_size * sequence_length), hidden_dim) hidden_states = hidden_states.to(self.dtype) self._cast_classifier() router_logits = self.classifier(hidden_states) top_1_mask, router_probs = self.route_tokens(router_logits, self.input_dtype, padding_mask) return top_1_mask, router_probs class NllbMoeDenseActDense(nn.Module): def __init__(self, config: NllbMoeConfig, ffn_dim: int): super().__init__() self.fc1 = nn.Linear(config.d_model, ffn_dim) self.fc2 = nn.Linear(ffn_dim, config.d_model) self.dropout = nn.Dropout(config.activation_dropout) self.act = ACT2FN[config.activation_function] def forward(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.fc2.weight, torch.Tensor) and hidden_states.dtype != self.fc2.weight.dtype and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8) ): hidden_states = hidden_states.to(self.fc2.weight.dtype) hidden_states = self.fc2(hidden_states) return hidden_states class NllbMoeSparseMLP(nn.Module): r""" Implementation of the NLLB-MoE sparse MLP module. """ def __init__(self, config: NllbMoeConfig, ffn_dim: int, expert_class: nn.Module = NllbMoeDenseActDense): super().__init__() self.router = NllbMoeTop2Router(config) self.moe_token_dropout = config.moe_token_dropout self.token_dropout = nn.Dropout(self.moe_token_dropout) self.num_experts = config.num_experts self.experts = nn.ModuleDict() for idx in range(self.num_experts): self.experts[f"expert_{idx}"] = expert_class(config, ffn_dim) def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.Tensor] = False): r""" The goal of this forward pass is to have the same number of operation as the equivalent `NllbMoeDenseActDense` (mlp) layer. This means that all of the hidden states should be processed at most twice ( since we are using a top_2 gating mecanism). This means that we keep the complexity to O(batch_size x sequence_length x hidden_dim) instead of O(num_experts x batch_size x sequence_length x hidden_dim). 1- Get the `router_probs` from the `router`. The shape of the `router_mask` is `(batch_size X sequence_length, num_expert)` and corresponds to the boolean version of the `router_probs`. The inputs are masked using the `router_mask`. 2- Dispatch the hidden_states to its associated experts. The router probabilities are used to weight the contribution of each experts when updating the masked hidden states. Args: hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`): The hidden states padding_mask (`torch.Tensor`, *optional*, defaults to `False`): Attention mask. Can be in the causal form or not. Returns: hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`): Updated hidden states router_logits (`torch.Tensor` of shape `(batch_size, sequence_length, num_experts)`): Needed for computing the loss """ batch_size, sequence_length, hidden_dim = hidden_states.shape top_1_mask, router_probs = self.router(hidden_states, padding_mask) router_mask = router_probs.bool() hidden_states = hidden_states.reshape((batch_size * sequence_length), hidden_dim) masked_hidden_states = torch.einsum("bm,be->ebm", hidden_states, router_mask) for idx, expert in enumerate(self.experts.values()): token_indices = router_mask[:, idx] combining_weights = router_probs[token_indices, idx] expert_output = expert(masked_hidden_states[idx, token_indices]) if self.moe_token_dropout > 0: if self.training: expert_output = self.token_dropout(expert_output) else: expert_output *= 1 - self.moe_token_dropout masked_hidden_states[idx, token_indices] = torch.einsum("b,be->be", combining_weights, expert_output) hidden_states = masked_hidden_states.sum(dim=0).reshape(batch_size, sequence_length, hidden_dim) top_1_expert_index = torch.argmax(top_1_mask, dim=-1) return hidden_states, (router_probs, top_1_expert_index) # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->NllbMoe,key_value_states->encoder_hidden_states class NllbMoeAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[NllbMoeConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if encoder_hidden_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = encoder_hidden_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == encoder_hidden_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `encoder_hidden_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == encoder_hidden_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz) value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class NllbMoeEncoderLayer(nn.Module): def __init__(self, config: NllbMoeConfig, is_sparse: bool = False): super().__init__() self.embed_dim = config.d_model self.is_sparse = is_sparse self.self_attn = NllbMoeAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.attn_dropout = nn.Dropout(config.dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) if not self.is_sparse: self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.encoder_ffn_dim) else: self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.encoder_ffn_dim) self.ff_layer_norm = nn.LayerNorm(config.d_model) self.ff_dropout = nn.Dropout(config.activation_dropout) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, output_router_logits: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.ff_layer_norm(hidden_states) if self.is_sparse: hidden_states, router_states = self.ffn(hidden_states, attention_mask) else: # router_states set to None to track which layers have None gradients. hidden_states, router_states = self.ffn(hidden_states), None hidden_states = self.ff_dropout(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) if output_router_logits: outputs += (router_states,) return outputs class NllbMoeDecoderLayer(nn.Module): def __init__(self, config: NllbMoeConfig, is_sparse: bool = False): super().__init__() self.embed_dim = config.d_model self.is_sparse = is_sparse self.self_attn = NllbMoeAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.attn_dropout = nn.Dropout(config.dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.cross_attention = NllbMoeAttention( self.embed_dim, config.decoder_attention_heads, config.attention_dropout, is_decoder=True ) self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim) if not self.is_sparse: self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.decoder_ffn_dim) else: self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.decoder_ffn_dim) self.ff_layer_norm = nn.LayerNorm(config.d_model) self.ff_dropout = nn.Dropout(config.activation_dropout) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, output_router_logits: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.cross_attention_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, past_key_value=cross_attn_past_key_value, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, output_attentions=output_attentions, ) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value += cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.ff_layer_norm(hidden_states) if self.is_sparse: hidden_states, router_states = self.ffn(hidden_states, attention_mask) else: hidden_states, router_states = self.ffn(hidden_states), None hidden_states = self.ff_dropout(hidden_states) hidden_states = residual + hidden_states # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states, present_key_value) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if output_router_logits: outputs += (router_states,) return outputs class NllbMoePreTrainedModel(PreTrainedModel): config_class = NllbMoeConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["NllbMoeEncoderLayer", "NllbMoeDecoderLayer"] def _init_weights(self, module): """Initialize the weights""" std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() NLLB_MOE_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`NllbMoeConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ NLLB_MOE_GENERATION_EXAMPLE = r""" Translation example: ```python >>> from transformers import AutoTokenizer, NllbMoeForConditionalGeneration >>> model = NllbMoeForConditionalGeneration.from_pretrained("facebook/nllb-moe-54b") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b") >>> text_to_translate = "Life is like a box of chocolates" >>> model_inputs = tokenizer(text_to_translate, return_tensors="pt") >>> # translate to French >>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("eng_Latn")) >>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)) ``` """ NLLB_MOE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) NllbMoe uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class NllbMoeEncoder(NllbMoePreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`NllbMoeEncoderLayer`]. Args: config: NllbMoeConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = NllbMoeScaledWordEmbedding( config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale ) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = NllbMoeSinusoidalPositionalEmbedding( config.max_position_embeddings, embed_dim, self.padding_idx, ) sparse_step = config.encoder_sparse_step self.layers = nn.ModuleList() for i in range(config.encoder_layers): is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False self.layers.append(NllbMoeEncoderLayer(config, is_sparse)) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input_ids, inputs_embeds) embed_pos = embed_pos.to(inputs_embeds.device) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_router_probs = () if output_router_logits else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, output_router_logits=output_router_logits, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_router_logits: all_router_probs += (layer_outputs[-1],) last_hidden_state = self.layer_norm(hidden_states) if output_hidden_states: encoder_states += (last_hidden_state,) if not return_dict: return tuple( v for v in [last_hidden_state, encoder_states, all_attentions, all_router_probs] if v is not None ) return MoEModelOutput( last_hidden_state=last_hidden_state, hidden_states=encoder_states, attentions=all_attentions, router_probs=all_router_probs, ) class NllbMoeDecoder(NllbMoePreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`NllbMoeDecoderLayer`] Args: config: NllbMoeConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = NllbMoeScaledWordEmbedding( config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale ) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = NllbMoeSinusoidalPositionalEmbedding( config.max_position_embeddings, config.d_model, self.padding_idx, ) sparse_step = config.decoder_sparse_step self.layers = nn.ModuleList() for i in range(config.decoder_layers): is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False self.layers.append(NllbMoeDecoderLayer(config, is_sparse)) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting" " `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_router_probs = () if output_router_logits else None all_cross_attentions = () if output_attentions else None present_key_value_states = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: layer_head_mask = head_mask[idx] if head_mask is not None else None cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None past_key_value = past_key_values[idx] if past_key_values is not None else None # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False layer_outputs = self._gradient_checkpointing_func( decoder_layer.forward, hidden_states, combined_attention_mask, encoder_hidden_states, encoder_attention_mask, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, output_router_logits=output_router_logits, ) hidden_states = layer_outputs[0] if skip_the_layer: continue if use_cache: present_key_value_states += (layer_outputs[1],) if output_attentions: all_self_attns += (layer_outputs[2],) all_cross_attentions += (layer_outputs[3],) if output_router_logits: all_router_probs += (layer_outputs[-1],) hidden_states = self.layer_norm(hidden_states) # Add last layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_self_attns, all_cross_attentions, all_router_probs, ] if v is not None ) return MoEModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, router_probs=all_router_probs, ) @add_start_docstrings( "The bare NllbMoe Model outputting raw hidden-states without any specific head on top.", NLLB_MOE_START_DOCSTRING, ) class NllbMoeModel(NllbMoePreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: NllbMoeConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = NllbMoeScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = NllbMoeEncoder(config, self.shared) self.decoder = NllbMoeDecoder(config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING) @add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqMoEModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqMoEModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, NllbMoeModel >>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts") >>> model = SwitchTransformersModel.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for NllbMoeModel >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, MoEModelOutput): encoder_outputs = MoEModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqMoEModelOutput( past_key_values=decoder_outputs.past_key_values, cross_attentions=decoder_outputs.cross_attentions, last_hidden_state=decoder_outputs.last_hidden_state, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, decoder_hidden_states=decoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, decoder_attentions=decoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_probs, decoder_router_logits=decoder_outputs.router_probs, ) @add_start_docstrings( "The NllbMoe Model with a language modeling head. Can be used for summarization.", NLLB_MOE_START_DOCSTRING ) class NllbMoeForConditionalGeneration(NllbMoePreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: NllbMoeConfig): super().__init__(config) self.model = NllbMoeModel(config) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.router_z_loss_coef = config.router_z_loss_coef self.router_aux_loss_coef = config.router_aux_loss_coef # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqMoEOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(NLLB_MOE_GENERATION_EXAMPLE) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqMoEOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) if labels is not None: if decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) loss = None encoder_aux_loss = None decoder_aux_loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # todo check in the config if router loss enables if output_router_logits: encoder_router_logits = outputs[-1] decoder_router_logits = outputs[3 if output_attentions else 4] # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_router_logits) encoder_aux_loss = load_balancing_loss_func(encoder_router_logits, encoder_expert_indexes) decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_router_logits) decoder_aux_loss = load_balancing_loss_func(decoder_router_logits, decoder_expert_indexes) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if output_router_logits and labels is not None: aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss) loss = loss + aux_loss output = (loss,) if loss is not None else () if not return_dict: output += (lm_logits,) if output_router_logits: # only return the loss if they are not None output += ( encoder_aux_loss, decoder_aux_loss, *outputs[1:], ) else: output += outputs[1:] return output return Seq2SeqMoEOutput( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, cross_attentions=outputs.cross_attentions, encoder_aux_loss=encoder_aux_loss, decoder_aux_loss=decoder_aux_loss, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, decoder_hidden_states=outputs.decoder_hidden_states, encoder_attentions=outputs.encoder_attentions, decoder_attentions=outputs.decoder_attentions, encoder_router_logits=outputs.encoder_router_logits, decoder_router_logits=outputs.decoder_router_logits, ) def _unpack_router_logits(self, router_outputs): total_router_logits = [] total_expert_indexes = [] for router_output in router_outputs: if router_output is not None: router_logits, expert_indexes = router_output total_router_logits.append(router_logits) total_expert_indexes.append(expert_indexes) total_router_logits = torch.cat(total_router_logits, dim=1) if len(total_router_logits) > 0 else None total_expert_indexes = torch.stack(total_expert_indexes, dim=1) if len(total_expert_indexes) > 0 else None return total_router_logits, total_expert_indexes # Copied from transfomers.models.switch_transformers.SwitchTransformersForConditionalGeneration.prepare_inputs_for_generation def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
transformers/src/transformers/models/nllb_moe/modeling_nllb_moe.py/0
{ "file_path": "transformers/src/transformers/models/nllb_moe/modeling_nllb_moe.py", "repo_id": "transformers", "token_count": 37846 }
381
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OneFormer checkpoints from the original repository. URL: https://github.com/SHI-Labs/OneFormer""" import os import sys from argparse import ArgumentParser from dataclasses import dataclass from pathlib import Path from pprint import pformat from typing import Any, Dict, Iterator, List, Set, Tuple import requests import torch import torchvision.transforms as T from PIL import Image from torch import Tensor, nn try: from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog from detectron2.projects.deeplab import add_deeplab_config except ImportError: pass from transformers import CLIPTokenizer, DinatConfig, SwinConfig from transformers.models.oneformer.image_processing_oneformer import OneFormerImageProcessor from transformers.models.oneformer.modeling_oneformer import ( OneFormerConfig, OneFormerForUniversalSegmentation, OneFormerForUniversalSegmentationOutput, OneFormerModel, OneFormerModelOutput, ) from transformers.models.oneformer.processing_oneformer import OneFormerProcessor from transformers.utils import logging StateDict = Dict[str, Tensor] logging.set_verbosity_info() logger = logging.get_logger() torch.manual_seed(0) class TrackedStateDict: def __init__(self, to_track: Dict): """This class "tracks" a python dictionary by keeping track of which item is accessed. Args: to_track (Dict): The dictionary we wish to track """ self.to_track = to_track self._seen: Set[str] = set() def __getitem__(self, key: str) -> Any: return self.to_track[key] def __setitem__(self, key: str, item: Any): self._seen.add(key) self.to_track[key] = item def diff(self) -> List[str]: """This method returns a set difference between the keys in the tracked state dict and the one we have access so far. This is an effective method to check if we have update all the keys Returns: List[str]: List of keys not yet updated """ return set(self.to_track.keys()) - self._seen def copy(self) -> Dict: # proxy the call to the internal dictionary return self.to_track.copy() # Image to verify the result def prepare_img(): url = "https://praeclarumjj3.github.io/files/coco.jpeg" img_data = requests.get(url, stream=True).raw im = Image.open(img_data) return im @dataclass class Args: """Fake command line arguments needed by oneformer/detectron2 implementation""" config_file: str def setup_cfg(args: Args): # load config from file and command-line arguments cfg = get_cfg() add_deeplab_config(cfg) add_common_config(cfg) add_oneformer_config(cfg) add_swin_config(cfg) add_dinat_config(cfg) cfg.merge_from_file(args.config_file) cfg.freeze() return cfg class OriginalOneFormerConfigToOursConverter: def __call__(self, original_config: object, is_swin: bool) -> OneFormerConfig: model = original_config.MODEL dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0]) id2label = dict(enumerate(dataset_catalog.stuff_classes)) label2id = {label: idx for idx, label in id2label.items()} if is_swin: if model.SWIN.EMBED_DIM == 96: backbone_config = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224", drop_path_rate=model.SWIN.DROP_PATH_RATE, out_features=["stage1", "stage2", "stage3", "stage4"], ) elif model.SWIN.EMBED_DIM == 192: backbone_config = SwinConfig.from_pretrained( "microsoft/swin-large-patch4-window12-384", drop_path_rate=model.SWIN.DROP_PATH_RATE, out_features=["stage1", "stage2", "stage3", "stage4"], ) else: raise ValueError(f"embed dim {model.SWIN.EMBED_DIM} not supported for Swin!") else: backbone_config = DinatConfig.from_pretrained( "shi-labs/dinat-large-11x11-in22k-in1k-384", dilations=model.DiNAT.DILATIONS, kernel_size=model.DiNAT.KERNEL_SIZE, out_features=["stage1", "stage2", "stage3", "stage4"], ) config: OneFormerConfig = OneFormerConfig( backbone_config=backbone_config, output_attentions=True, output_hidden_states=True, return_dict=True, ignore_value=model.SEM_SEG_HEAD.IGNORE_VALUE, num_classes=model.SEM_SEG_HEAD.NUM_CLASSES, num_queries=model.ONE_FORMER.NUM_OBJECT_QUERIES, no_object_weight=model.ONE_FORMER.NO_OBJECT_WEIGHT, class_weight=model.ONE_FORMER.CLASS_WEIGHT, mask_weight=model.ONE_FORMER.MASK_WEIGHT, dice_weight=model.ONE_FORMER.DICE_WEIGHT, contrastive_weight=model.ONE_FORMER.CONTRASTIVE_WEIGHT, contrastive_temperature=model.ONE_FORMER.CONTRASTIVE_TEMPERATURE, train_num_points=model.ONE_FORMER.TRAIN_NUM_POINTS, oversample_ratio=model.ONE_FORMER.OVERSAMPLE_RATIO, importance_sample_ratio=model.ONE_FORMER.IMPORTANCE_SAMPLE_RATIO, init_std=0.02, init_xavier_std=1.0, layer_norm_eps=1e-05, is_training=False, use_auxiliary_loss=model.ONE_FORMER.DEEP_SUPERVISION, output_auxiliary_logits=True, strides=[4, 8, 16, 32], task_seq_len=original_config.INPUT.TASK_SEQ_LEN, max_seq_len=original_config.INPUT.MAX_SEQ_LEN, text_encoder_width=model.TEXT_ENCODER.WIDTH, text_encoder_context_length=model.TEXT_ENCODER.CONTEXT_LENGTH, text_encoder_num_layers=model.TEXT_ENCODER.NUM_LAYERS, text_encoder_vocab_size=model.TEXT_ENCODER.VOCAB_SIZE, text_encoder_proj_layers=model.TEXT_ENCODER.PROJ_NUM_LAYERS, text_encoder_n_ctx=model.TEXT_ENCODER.N_CTX, conv_dim=model.SEM_SEG_HEAD.CONVS_DIM, mask_dim=model.SEM_SEG_HEAD.MASK_DIM, hidden_dim=model.ONE_FORMER.HIDDEN_DIM, norm=model.SEM_SEG_HEAD.NORM, encoder_layers=model.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS, encoder_feedforward_dim=1024, decoder_layers=model.ONE_FORMER.DEC_LAYERS, use_task_norm=model.ONE_FORMER.USE_TASK_NORM, num_attention_heads=model.ONE_FORMER.NHEADS, dropout=model.ONE_FORMER.DROPOUT, dim_feedforward=model.ONE_FORMER.DIM_FEEDFORWARD, pre_norm=model.ONE_FORMER.PRE_NORM, enforce_input_proj=model.ONE_FORMER.ENFORCE_INPUT_PROJ, query_dec_layers=model.ONE_FORMER.CLASS_DEC_LAYERS, common_stride=model.SEM_SEG_HEAD.COMMON_STRIDE, id2label=id2label, label2id=label2id, ) return config class OriginalOneFormerConfigToProcessorConverter: def __call__(self, original_config: object, model_repo: str) -> OneFormerProcessor: model = original_config.MODEL model_input = original_config.INPUT dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0]) if "ade20k" in model_repo: class_info_file = "ade20k_panoptic.json" elif "coco" in model_repo: class_info_file = "coco_panoptic.json" elif "cityscapes" in model_repo: class_info_file = "cityscapes_panoptic.json" else: raise ValueError("Invalid Dataset!") image_processor = OneFormerImageProcessor( image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), size=model_input.MIN_SIZE_TEST, max_size=model_input.MAX_SIZE_TEST, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, ignore_index=dataset_catalog.ignore_label, class_info_file=class_info_file, ) tokenizer = CLIPTokenizer.from_pretrained(model_repo) return OneFormerProcessor( image_processor=image_processor, tokenizer=tokenizer, task_seq_length=original_config.INPUT.TASK_SEQ_LEN, max_seq_length=original_config.INPUT.MAX_SEQ_LEN, ) class OriginalOneFormerCheckpointToOursConverter: def __init__(self, original_model: nn.Module, config: OneFormerConfig): self.original_model = original_model self.config = config def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict): for src_key, dst_key in renamed_keys: dst_state_dict[dst_key] = src_state_dict.pop(src_key) # Swin Backbone def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig): dst_prefix: str = "pixel_level_module.encoder" src_prefix: str = "backbone" renamed_keys = [ ( f"{src_prefix}.patch_embed.proj.weight", f"{dst_prefix}.embeddings.patch_embeddings.projection.weight", ), (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.embeddings.patch_embeddings.projection.bias"), (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.embeddings.norm.weight"), (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.embeddings.norm.bias"), ] num_layers = len(config.backbone_config.depths) for layer_idx in range(num_layers): for block_idx in range(config.backbone_config.depths[layer_idx]): renamed_keys.extend( [ # src, dst ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table", ), ] ) # now we need to handle the attentions # read in weights + bias of input projection layer of cross-attention src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] size = src_att_weight.shape[0] offset = size // 3 dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight" ] = src_att_weight[:offset, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias" ] = src_att_bias[:offset] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight" ] = src_att_weight[offset : offset * 2, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias" ] = src_att_bias[offset : offset * 2] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight" ] = src_att_weight[-offset:, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias" ] = src_att_bias[-offset:] # let's pop them src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") # proj renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias", ), ] ) # second norm renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias", ), ] ) # mlp renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias", ), ] ) renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index", ) ] ) if layer_idx < num_layers - 1: # patch merging renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias", ), ] ) # hidden states norms renamed_keys.extend( [ ( f"{src_prefix}.norm{layer_idx}.weight", f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight", ), ( f"{src_prefix}.norm{layer_idx}.bias", f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) # Dinat Backbone def replace_dinat_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig): dst_prefix: str = "pixel_level_module.encoder" src_prefix: str = "backbone" def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] renamed_keys = rename_keys_for_weight_bias(f"{src_prefix}.patch_embed.norm", f"{dst_prefix}.embeddings.norm") for i in range(2): renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.patch_embed.proj.{i}", f"{dst_prefix}.embeddings.patch_embeddings.projection.{i}", ) ) num_layers = len(config.backbone_config.depths) for layer_idx in range(num_layers): for block_idx in range(config.backbone_config.depths[layer_idx]): renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm1", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_before", ) ) renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm2", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_after", ) ) renamed_keys.extend( [ # src, dst ( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.rpb", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.rpb", ), ] ) # now we need to handle the attentions # read in weights + bias of input projection layer of cross-attention src_att_weight = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] src_att_bias = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] size = src_att_weight.shape[0] offset = size // 3 dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.weight" ] = src_att_weight[:offset, :] dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.bias" ] = src_att_bias[:offset] dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.weight" ] = src_att_weight[offset : offset * 2, :] dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.bias" ] = src_att_bias[offset : offset * 2] dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.weight" ] = src_att_weight[-offset:, :] dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.bias" ] = src_att_bias[-offset:] # let's pop them src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") # proj renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.proj", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.output.dense", ) ) # mlp renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc1", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.intermediate.dense", ) ) renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc2", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.output.dense", ) ) if layer_idx < num_layers - 1: # patch merging renamed_keys.extend( [ ( f"{src_prefix}.levels.{layer_idx}.downsample.reduction.weight", f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.reduction.weight", ), ( f"{src_prefix}.levels.{layer_idx}.downsample.norm.weight", f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.weight", ), ( f"{src_prefix}.levels.{layer_idx}.downsample.norm.bias", f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.bias", ), ] ) # hidden states norms renamed_keys.extend( [ ( f"{src_prefix}.norm{layer_idx}.weight", f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight", ), ( f"{src_prefix}.norm{layer_idx}.bias", f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) # Backbone + Pixel Decoder def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict, is_swin: bool): dst_prefix: str = "pixel_level_module.decoder" src_prefix: str = "sem_seg_head.pixel_decoder" if is_swin: self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config) else: self.replace_dinat_backbone(dst_state_dict, src_state_dict, self.config) def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str): self_attn_keys = [] self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.attention_weights", f"{dst_prefix}.attention_weights") ) self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.output_proj", f"{dst_prefix}.output_proj") ) self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.sampling_offsets", f"{dst_prefix}.sampling_offsets") ) self_attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.value_proj", f"{dst_prefix}.value_proj")) return self_attn_keys def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str): encoder_keys = [] encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.fc1")) encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.fc2")) encoder_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.self_attn_layer_norm") ) encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.final_layer_norm")) encoder_keys.extend(rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")) return encoder_keys # convolution layer for final features renamed_keys = [ (f"{src_prefix}.adapter_1.weight", f"{dst_prefix}.adapter_1.0.weight"), (f"{src_prefix}.adapter_1.norm.weight", f"{dst_prefix}.adapter_1.1.weight"), (f"{src_prefix}.adapter_1.norm.bias", f"{dst_prefix}.adapter_1.1.bias"), ] renamed_keys.extend( [ (f"{src_prefix}.layer_1.weight", f"{dst_prefix}.layer_1.0.weight"), (f"{src_prefix}.layer_1.norm.weight", f"{dst_prefix}.layer_1.1.weight"), (f"{src_prefix}.layer_1.norm.bias", f"{dst_prefix}.layer_1.1.bias"), ] ) # proj layers for i in range(3): for j in range(2): renamed_keys.extend( [ (f"{src_prefix}.input_proj.{i}.{j}.weight", f"{dst_prefix}.input_projections.{i}.{j}.weight"), (f"{src_prefix}.input_proj.{i}.{j}.bias", f"{dst_prefix}.input_projections.{i}.{j}.bias"), ] ) renamed_keys.extend([(f"{src_prefix}.transformer.level_embed", f"{dst_prefix}.level_embed")]) # layers for layer_idx in range(self.config.encoder_layers): renamed_keys.extend( rename_keys_for_encoder_layer( f"{src_prefix}.transformer.encoder.layers.{layer_idx}", f"{dst_prefix}.encoder.layers.{layer_idx}" ) ) # proj renamed_keys.extend( [ (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"), (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) # Transformer Decoder def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder.layers" src_prefix: str = "sem_seg_head.predictor" for i in range(self.config.decoder_layers - 1): # read in weights + bias of input projection layer of self-attention in_proj_weight = src_state_dict.pop( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight" ) in_proj_bias = src_state_dict.pop( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.weight"] = in_proj_weight[:256, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.bias"] = in_proj_bias[:256] dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.bias"] = in_proj_bias[256:512] dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.bias"] = in_proj_bias[-256:] def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module" src_prefix: str = "sem_seg_head.predictor" def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] def rename_keys_for_attn(src_prefix: str, dst_prefix: str): attn_keys = [ (f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"), (f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"), ] attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj")) return attn_keys def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str): attn_keys = [] attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj")) return attn_keys def rename_keys_for_query_transformer_layer(src_prefix: str, dst_prefix: str): query_transformer_layer_keys = [] query_transformer_layer_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1") ) query_transformer_layer_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2") ) query_transformer_layer_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.norm1") ) query_transformer_layer_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.norm2") ) query_transformer_layer_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.norm3", f"{dst_prefix}.norm3") ) query_transformer_layer_keys.extend( rename_keys_for_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn") ) query_transformer_layer_keys.extend( rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn") ) return query_transformer_layer_keys def rename_keys_for_cross_attn_layer(src_prefix: str, dst_prefix: str): cross_attn_layer_keys = [] cross_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm")) cross_attn_layer_keys.extend( rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn") ) return cross_attn_layer_keys def rename_keys_for_self_attn_layer(src_prefix: str, dst_prefix: str): self_attn_layer_keys = [] self_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm")) self_attn_layer_keys.extend( rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn") ) return self_attn_layer_keys def rename_keys_for_ffn_layer(src_prefix: str, dst_prefix: str): ffn_layer_keys = [] ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1")) ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2")) ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm")) return ffn_layer_keys def rename_keys_for_transformer_decoder_layer(src_prefix: str, dst_prefix: str, idx: int): transformer_decoder_layer_keys = [] transformer_decoder_layer_keys.extend( rename_keys_for_cross_attn_layer( f"{src_prefix}.transformer_cross_attention_layers.{idx}", f"{dst_prefix}.{idx}.cross_attn" ) ) transformer_decoder_layer_keys.extend( rename_keys_for_self_attn_layer( f"{src_prefix}.transformer_self_attention_layers.{idx}", f"{dst_prefix}.{idx}.self_attn" ) ) transformer_decoder_layer_keys.extend( rename_keys_for_ffn_layer(f"{src_prefix}.transformer_ffn_layers.{idx}", f"{dst_prefix}.{idx}.ffn") ) return transformer_decoder_layer_keys # positional embedding for object queries renamed_keys = [ (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"), (f"{src_prefix}.level_embed.weight", f"{dst_prefix}.level_embed.weight"), ] # norm renamed_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.decoder_norm", f"{dst_prefix}.decoder.decoder_norm") ) # proj renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.class_input_proj", f"{dst_prefix}.decoder.query_input_projection" ) ) renamed_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.class_embed", f"{dst_prefix}.decoder.class_embed") ) for i in range(3): renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.mask_embed.layers.{i}", f"{dst_prefix}.decoder.mask_embed.layers.{i}.0" ) ) # norm renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.class_transformer.decoder.norm", f"{dst_prefix}.decoder.query_transformer.decoder.norm" ) ) # transformer to update queries with task tokens for i in range(self.config.query_dec_layers): renamed_keys.extend( rename_keys_for_query_transformer_layer( f"{src_prefix}.class_transformer.decoder.layers.{i}", f"{dst_prefix}.decoder.query_transformer.decoder.layers.{i}", ) ) # decoder layers for i in range(self.config.decoder_layers - 1): renamed_keys.extend( rename_keys_for_transformer_decoder_layer( f"{src_prefix}", f"{dst_prefix}.decoder.layers", i, ) ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict) def replace_task_mlp(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "task_encoder" src_prefix: str = "task_mlp" def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] renamed_keys = [] for i in range(2): renamed_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.task_mlp.layers.{i}.0") ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_text_projector(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "text_mapper.text_projector" src_prefix: str = "text_projector" def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] renamed_keys = [] for i in range(self.config.text_encoder_config["text_encoder_proj_layers"]): renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.{i}.0")) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_text_mapper(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "text_mapper.text_encoder" src_prefix: str = "text_encoder" self.replace_text_projector(dst_state_dict, src_state_dict) def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] def rename_keys_for_attn(src_prefix: str, dst_prefix: str): attn_keys = [ (f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"), (f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"), ] attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj")) return attn_keys def rename_keys_for_layer(src_prefix: str, dst_prefix: str): resblock_keys = [] resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_fc", f"{dst_prefix}.mlp.fc1")) resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_proj", f"{dst_prefix}.mlp.fc2")) resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_1", f"{dst_prefix}.layer_norm1")) resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_2", f"{dst_prefix}.layer_norm2")) resblock_keys.extend(rename_keys_for_attn(f"{src_prefix}.attn", f"{dst_prefix}.self_attn")) return resblock_keys renamed_keys = [ ("prompt_ctx.weight", "text_mapper.prompt_ctx.weight"), ] renamed_keys.extend( [ (f"{src_prefix}.positional_embedding", f"{dst_prefix}.positional_embedding"), (f"{src_prefix}.token_embedding.weight", f"{dst_prefix}.token_embedding.weight"), ] ) renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_final", f"{dst_prefix}.ln_final")) for i in range(self.config.text_encoder_config["text_encoder_num_layers"]): renamed_keys.extend( rename_keys_for_layer( f"{src_prefix}.transformer.resblocks.{i}", f"{dst_prefix}.transformer.layers.{i}" ) ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def convert(self, oneformer: OneFormerModel, is_swin: bool) -> OneFormerModel: dst_state_dict = TrackedStateDict(oneformer.state_dict()) src_state_dict = self.original_model.state_dict() self.replace_pixel_module(dst_state_dict, src_state_dict, is_swin) self.replace_transformer_module(dst_state_dict, src_state_dict) self.replace_task_mlp(dst_state_dict, src_state_dict) if self.config.is_training: self.replace_text_mapper(dst_state_dict, src_state_dict) logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}") logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}") logger.info("🙌 Done") oneformer.load_state_dict(dst_state_dict) return oneformer @staticmethod def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]: checkpoints: List[Path] = checkpoints_dir.glob("**/*.pth") for checkpoint in checkpoints: logger.info(f"💪 Converting {checkpoint.stem}") # find associated config file config: Path = config_dir / f"{checkpoint.stem}.yaml" yield config, checkpoint def post_process_sem_seg_output(outputs: OneFormerForUniversalSegmentationOutput, target_size: Tuple[int, int]): # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1] class_queries_logits = outputs.class_queries_logits # masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH] masks_queries_logits = outputs.masks_queries_logits if target_size is not None: masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=target_size, mode="bilinear", align_corners=False, ) # remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] # mask probs has shape [BATCH, QUERIES, HEIGHT, WIDTH] masks_probs = masks_queries_logits.sigmoid() # now we want to sum over the queries, # $ out_{c,h,w} = \sum_q p_{q,c} * m_{q,h,w} $ # where $ softmax(p) \in R^{q, c} $ is the mask classes # and $ sigmoid(m) \in R^{q, h, w}$ is the mask probabilities # b(atch)q(uery)c(lasses), b(atch)q(uery)h(eight)w(idth) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) return segmentation def test( original_model, our_model: OneFormerForUniversalSegmentation, processor: OneFormerProcessor, model_repo: str, ): def _preprocess_text(text_list=None, max_length=77): if text_list is None: raise ValueError("tokens cannot be None.") tokens = tokenizer(text_list, padding="max_length", max_length=max_length, truncation=True) attention_masks, input_ids = tokens["attention_mask"], tokens["input_ids"] token_inputs = [] for attn_mask, input_id in zip(attention_masks, input_ids): token = torch.tensor(attn_mask) * torch.tensor(input_id) token_inputs.append(token.unsqueeze(0)) token_inputs = torch.cat(token_inputs, dim=0) return token_inputs with torch.no_grad(): tokenizer = CLIPTokenizer.from_pretrained(model_repo) original_model = original_model.eval() our_model = our_model.eval() im = prepare_img() tr = T.Compose( [ T.Resize((640, 640)), T.ToTensor(), T.Normalize( mean=torch.tensor([123.675, 116.280, 103.530]) / 255.0, std=torch.tensor([58.395, 57.120, 57.375]) / 255.0, ), ], ) x = tr(im).unsqueeze(0) task_input = ["the task is semantic"] task_token = _preprocess_text(task_input, max_length=processor.task_seq_length) original_model_backbone_features = original_model.backbone(x.clone()) our_model_output: OneFormerModelOutput = our_model.model(x.clone(), task_token, output_hidden_states=True) for original_model_feature, our_model_feature in zip( original_model_backbone_features.values(), our_model_output.encoder_hidden_states ): assert torch.allclose( original_model_feature, our_model_feature, atol=3e-3 ), "The backbone features are not the same." mask_features, _, multi_scale_features, _, _ = original_model.sem_seg_head.pixel_decoder.forward_features( original_model_backbone_features ) original_pixel_decoder_features = [] original_pixel_decoder_features.append(mask_features) for i in range(len(multi_scale_features)): original_pixel_decoder_features.append(multi_scale_features[i]) for original_model_feature, our_model_feature in zip( original_pixel_decoder_features, our_model_output.pixel_decoder_hidden_states ): assert torch.allclose( original_model_feature, our_model_feature, atol=3e-4 ), "The pixel decoder feature are not the same" tr_complete = T.Compose( [ T.Resize((640, 640)), T.ToTensor(), ], ) y = (tr_complete(im) * 255.0).to(torch.int).float() # let's test the full model original_model_out = original_model([{"image": y.clone(), "task": "The task is semantic"}]) original_segmentation = original_model_out[0]["sem_seg"] our_model_out: OneFormerForUniversalSegmentationOutput = our_model( x.clone(), task_token, output_hidden_states=True ) our_segmentation = post_process_sem_seg_output(our_model_out, target_size=(640, 640))[0] assert torch.allclose( original_segmentation, our_segmentation, atol=1e-3 ), "The segmentation image is not the same." logger.info("✅ Test passed!") def get_name(checkpoint_file: Path): model_name_raw: str = checkpoint_file.stem backbone = "swin" if "swin" in model_name_raw else "dinat" dataset = "" if "coco" in model_name_raw: dataset = "coco" elif "ade20k" in model_name_raw: dataset = "ade20k" elif "cityscapes" in model_name_raw: dataset = "cityscapes" else: raise ValueError( f"{model_name_raw} must be wrong since we didn't find 'coco' or 'ade20k' or 'cityscapes' in it " ) backbone_types = ["tiny", "large"] backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0] model_name = f"oneformer_{dataset}_{backbone}_{backbone_type}" return model_name if __name__ == "__main__": parser = ArgumentParser( description=( "Command line to convert the original oneformer models (with swin backbone) to transformers" " implementation." ) ) parser.add_argument( "--checkpoints_dir", type=Path, help=( "A directory containing the model's checkpoints. The directory has to have the following structure:" " structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.pth; where <CONFIG_NAME> name must follow the" " following nomenclature nomenclature: oneformer_<DATASET_NAME>_<BACKBONE>_<BACKBONE_TYPE>" ), ) parser.add_argument( "--configs_dir", type=Path, help=( "A directory containing the model's configs, see detectron2 doc. The directory has to have the following" " structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.yaml; where <CONFIG_NAME> name must follow the" " following nomenclature nomenclature: oneformer_<DATASET_NAME>_<BACKBONE>_<BACKBONE_TYPE>" ), ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=Path, help="Path to the folder to output PyTorch models.", ) parser.add_argument( "--oneformer_dir", required=True, type=Path, help=( "A path to OneFormer's original implementation directory. You can download from here: " "https://github.com/SHI-Labs/OneFormer" ), ) args = parser.parse_args() checkpoints_dir: Path = args.checkpoints_dir config_dir: Path = args.configs_dir save_directory: Path = args.pytorch_dump_folder_path oneformer_dir: Path = args.oneformer_dir # append the path to the parents to oneformer dir sys.path.append(str(oneformer_dir.parent)) # and import what's needed from OneFormer.oneformer import add_common_config, add_dinat_config, add_oneformer_config, add_swin_config from OneFormer.oneformer.oneformer_model import OneFormer as OriginalOneFormer if not save_directory.exists(): save_directory.mkdir(parents=True) for config_file, checkpoint_file in OriginalOneFormerCheckpointToOursConverter.using_dirs( checkpoints_dir, config_dir ): processor = OriginalOneFormerConfigToProcessorConverter()( setup_cfg(Args(config_file=config_file)), os.path.join("shi-labs", config_file.stem) ) original_config = setup_cfg(Args(config_file=config_file)) oneformer_kwargs = OriginalOneFormer.from_config(original_config) original_model = OriginalOneFormer(**oneformer_kwargs).eval() DetectionCheckpointer(original_model).load(str(checkpoint_file)) is_swin = "swin" in config_file.stem config: OneFormerConfig = OriginalOneFormerConfigToOursConverter()(original_config, is_swin) oneformer = OneFormerModel(config=config).eval() converter = OriginalOneFormerCheckpointToOursConverter(original_model, config) oneformer = converter.convert(oneformer, is_swin) oneformer_for_universal_segmentation = OneFormerForUniversalSegmentation(config=config).eval() oneformer_for_universal_segmentation.model = oneformer test( original_model, oneformer_for_universal_segmentation, processor, os.path.join("shi-labs", config_file.stem), ) model_name = get_name(checkpoint_file) logger.info(f"🪄 Saving {model_name}") processor.save_pretrained(save_directory / model_name) oneformer_for_universal_segmentation.save_pretrained(save_directory / model_name) processor.push_to_hub( repo_id=os.path.join("shi-labs", config_file.stem), commit_message="Add configs", use_temp_dir=True, ) oneformer_for_universal_segmentation.push_to_hub( repo_id=os.path.join("shi-labs", config_file.stem), commit_message="Add model", use_temp_dir=True, )
transformers/src/transformers/models/oneformer/convert_to_hf_oneformer.py/0
{ "file_path": "transformers/src/transformers/models/oneformer/convert_to_hf_oneformer.py", "repo_id": "transformers", "token_count": 26214 }
382
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 OPT model.""" from __future__ import annotations from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast # Public API from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSharedEmbeddings, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_opt import OPTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/opt-350m" _CONFIG_FOR_DOC = "OPTConfig" # Base model docstring _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] # Causal LM output _CAUSAL_LM_EXPECTED_OUTPUT = ( "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo." ) LARGE_NEGATIVE = -1e8 def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] # We need triu with k = 1 but TF expects known compile-time dims for that, so we hack around it mask = tf.fill((tgt_len, tgt_len), tf.cast(LARGE_NEGATIVE, tf.float32)) mask = tf.linalg.band_part(mask, 0, -1) - tf.linalg.band_part(mask, 0, 0) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFOPTLearnedPositionalEmbedding(keras.layers.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs) def call(self, attention_mask, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" attention_mask = tf.cast(attention_mask, tf.int64) # create positions depending on attention_mask positions = tf.math.cumsum(attention_mask, axis=1) * attention_mask - 1 # cut positions if `past_key_values_length` is > 0 positions = positions[:, past_key_values_length:] return super().call(positions + self.offset) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->OPT class TFOPTAttention(keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFOPTDecoderLayer(keras.layers.Layer): def __init__(self, config: OPTConfig, **kwargs): super().__init__(**kwargs) self.do_layer_norm_before = config.do_layer_norm_before self.embed_dim = config.hidden_size self.self_attn = TFOPTAttention( embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.fc1 = keras.layers.Dense(config.ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, training: Optional[bool] = False, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`, *optional*): mask for attention heads in a given layer of size `(decoder_attention_heads,)` past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) return (hidden_states, self_attn_weights, present_key_value) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) OPT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`OPTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare OPT Model outputting raw hidden-states without any specific head on top.", OPT_START_DOCSTRING, ) class TFOPTPreTrainedModel(TFPreTrainedModel): """ TFOPT Pretrained Model that inheritates from transformers.TFPreTrainedModel Args: config: OPTConfig """ config_class = OPTConfig base_model_prefix = "model" OPT_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @keras_serializable class TFOPTDecoder(keras.layers.Layer): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.layerdrop = config.layerdrop num_embeddings = config.max_position_embeddings self.embed_tokens = TFSharedEmbeddings( config.vocab_size, config.word_embed_proj_dim, config.pad_token_id, name="embed_tokens" ) self.embed_positions = TFOPTLearnedPositionalEmbedding( num_embeddings, config.hidden_size, name="embed_positions", ) # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 if config.do_layer_norm_before and not config._remove_final_layer_norm: self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") else: self.final_layer_norm = None if config.word_embed_proj_dim != config.hidden_size: self.project_out = keras.layers.Dense(config.word_embed_proj_dim, name="project_out", use_bias=False) self.project_in = keras.layers.Dense(config.hidden_size, name="project_in", use_bias=False) else: self.project_in = None self.project_out = None self.layers = [TFOPTDecoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)] self.dropout = keras.layers.Dropout(config.dropout) def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens.vocab_size = new_embeddings.shape[0] self.embed_tokens.weight = new_embeddings def get_input_embeddings(self): return self.embed_tokens def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length): # create causal mask # # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] _, seq_length = input_shape tf.debugging.assert_equal( seq_length + past_key_values_length, shape_list(attention_mask)[1], message="Attention mask shape should be (batch_size, seq_length + past_key_values_length)" f" but is {shape_list(attention_mask)[1]} with input_ids shape {input_shape} and past length" f" {past_key_values_length}.", ) expanded_attn_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) if seq_length > 1: combined_attention_mask = ( _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) + expanded_attn_mask ) else: combined_attention_mask = expanded_attn_mask return combined_attention_mask @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size) inputs_embeds = self.embed_tokens(input_ids) if attention_mask is None: attention_mask = tf.ones((input_shape[0], input_shape[1] + past_key_values_length), dtype=tf.bool) else: tf.debugging.assert_equal( shape_list(attention_mask)[1], past_key_values_length + input_shape[1], message=( f"The provided attention mask has length {tf.shape(attention_mask)[1]}, but its length should be " f"{past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)" ), ) pos_embeds = self.embed_positions(attention_mask, past_key_values_length) attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), message=( f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None hidden_states, layer_self_attn, present_key_value = decoder_layer( hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, past_key_value=past_key_value, ) if use_cache: present_key_values += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, present_key_values, all_hidden_states, all_self_attns] if v is not None ) else: return TFBaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_tokens", None) is not None: with tf.name_scope(self.embed_tokens.name): self.embed_tokens.build(None) if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "project_out", None) is not None: with tf.name_scope(self.project_out.name): self.project_out.build([None, None, self.config.hidden_size]) if getattr(self, "project_in", None) is not None: with tf.name_scope(self.project_in.name): self.project_in.build([None, None, self.config.word_embed_proj_dim]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFOPTMainLayer(keras.layers.Layer): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(**kwargs) self.config = config self.decoder = TFOPTDecoder(config, name="decoder") def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, new_embeddings): self.decoder.set_input_embeddings(new_embeddings) @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs, ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.decoder( input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return outputs return TFBaseModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings( "The bare TF OPT Model outputting raw hidden-states without any specific head on top.", OPT_START_DOCSTRING, ) @keras_serializable class TFOPTModel(TFOPTPreTrainedModel): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(config, **kwargs) self.config = config self.model = TFOPTMainLayer(config, name="model") def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, new_embeddings): self.model.set_input_embeddings(new_embeddings) @unpack_inputs @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs, ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return outputs return TFBaseModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutputWithPast( last_hidden_state=output.last_hidden_state, past_key_values=pkv, hidden_states=hs, attentions=attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) @add_start_docstrings( """ The OPT Model transformer with a language modeling head on top. """, OPT_START_DOCSTRING, ) @keras_serializable class TFOPTForCausalLM(TFOPTPreTrainedModel, TFCausalLanguageModelingLoss): config_class = OPTConfig def __init__(self, config: OPTConfig, **kwargs): super().__init__(config, **kwargs) self.config = config self.model = TFOPTMainLayer(config, name="model") def get_output_embeddings(self): return self.model.get_input_embeddings() def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): attention_mask = kwargs.get("attention_mask", None) # only last token for inputs_ids if past is defined in kwargs if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @unpack_inputs @replace_return_docstrings(output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC, expected_output=_CAUSAL_LM_EXPECTED_OUTPUT, ) def call( self, input_ids: TFModelInputType | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs, ) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) logits = self.model.decoder.embed_tokens(outputs[0], mode="linear") loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels, shifted_logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFCausalLMOutputWithPast( past_key_values=pkv, hidden_states=hs, attentions=attns, loss=output.loss, logits=output.logits, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None)
transformers/src/transformers/models/opt/modeling_tf_opt.py/0
{ "file_path": "transformers/src/transformers/models/opt/modeling_tf_opt.py", "repo_id": "transformers", "token_count": 21486 }
383
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert PaliGemma checkpoints from the original repository.""" import argparse import collections import torch from numpy import load from transformers import ( AutoTokenizer, GemmaTokenizer, GemmaTokenizerFast, PaliGemmaConfig, PaliGemmaForConditionalGeneration, PaliGemmaProcessor, SiglipImageProcessor, ) from transformers.tokenization_utils_base import AddedToken from transformers.utils import logging device = "cuda" # "cpu" logging.set_verbosity_info() logger = logging.get_logger(__name__) # TODO add sequence length variations here PALIGEMMA_VARIANTS = ["2b-test", "3b-224px", "3b-448px", "3b-896px"] def get_paligemma_config(variant: str, precision: str): config = { "image_token_index": None, "pad_token_id": 0, "bos_token_id": 2, "eos_token_id": 1, } image_sizes = {"2b-test": 224, "3b-224px": 224, "3b-448px": 448, "3b-896px": 896} if variant in PALIGEMMA_VARIANTS: image_size = image_sizes[variant] patch_size = 14 num_image_tokens = (image_size**2) // (patch_size**2) config["image_token_index"] = 257152 if variant != "2b-test" else 256000 text_config = { "vocab_size": 257152, "num_hidden_layers": 18, "num_key_value_heads": 1, "head_dim": 256, "torch_dtype": precision, "hidden_size": 2048, "hidden_activation": "gelu_pytorch_tanh", "num_attention_heads": 8, "intermediate_size": 16384, "is_encoder_decoder": False, } vision_config = { "torch_dtype": precision, "image_size": image_size, "patch_size": patch_size, "num_image_tokens": num_image_tokens, "hidden_size": 1152, "intermediate_size": 4304, "num_hidden_layers": 27, "num_attention_heads": 16, "projector_hidden_act": "gelu_fast", "vision_use_head": False, } final_config = PaliGemmaConfig(text_config=text_config, vision_config=vision_config, **config) else: raise ValueError(f"Identifier {variant} not supported. Available: {PALIGEMMA_VARIANTS}") return final_config def slice_state_dict(state_dict, config): # fmt: off # patch embeddings state_dict["vision_tower.vision_model.embeddings.patch_embedding.weight"] = state_dict.pop("img/embedding/kernel").transpose( 3, 2, 0, 1 ) state_dict["vision_tower.vision_model.embeddings.patch_embedding.bias"] = state_dict.pop("img/embedding/bias") # positional embeddings state_dict["vision_tower.vision_model.embeddings.position_embedding.weight"] = state_dict.pop("img/pos_embedding").reshape( -1, config.vision_config.hidden_size ) # extract vision layers to be sliced at index 0. There are 27 layers in the base model. encoderblock_layernorm0_scale = state_dict.pop("img/Transformer/encoderblock/LayerNorm_0/scale") encoderblock_layernorm0_bias = state_dict.pop("img/Transformer/encoderblock/LayerNorm_0/bias") encoderblock_layernorm1_scale = state_dict.pop("img/Transformer/encoderblock/LayerNorm_1/scale") encoderblock_layernorm1_bias = state_dict.pop("img/Transformer/encoderblock/LayerNorm_1/bias") encoderblock_mlp_dense0_kernel= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_0/kernel") encoderblock_mlp_dense0_bias= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_0/bias") encoderblock_mlp_dense1_kernel= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_1/kernel") encoderblock_mlp_dense1_bias= state_dict.pop("img/Transformer/encoderblock/MlpBlock_0/Dense_1/bias") encoderblock_attention_0_key_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/kernel") encoderblock_attention_0_key_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/bias") encoderblock_attention_0_value_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/kernel") encoderblock_attention_0_value_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/bias") encoderblock_attention_0_query_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/kernel") encoderblock_attention_0_query_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/bias") encoderblock_attention_0_out_kernel = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/kernel") encoderblock_attention_0_out_bias = state_dict.pop("img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/bias") for i in range(config.vision_config.num_hidden_layers): state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm1.weight"] = encoderblock_layernorm0_scale[i].transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm1.bias"] = encoderblock_layernorm0_bias[i] state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm2.weight"] = encoderblock_layernorm1_scale[i].transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.layer_norm2.bias"] = encoderblock_layernorm1_bias[i] state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.weight"] = encoderblock_mlp_dense0_kernel[i].transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.bias"] = encoderblock_mlp_dense0_bias[i] state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.weight"] = encoderblock_mlp_dense1_kernel[i].transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.bias"] = encoderblock_mlp_dense1_bias[i] state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.weight"] = encoderblock_attention_0_key_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.bias"] = encoderblock_attention_0_key_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.weight"] = encoderblock_attention_0_value_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.bias"] = encoderblock_attention_0_value_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.weight"] = encoderblock_attention_0_query_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.bias"] = encoderblock_attention_0_query_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.weight"] = encoderblock_attention_0_out_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.bias"] = encoderblock_attention_0_out_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict["vision_tower.vision_model.post_layernorm.weight"] = state_dict.pop("img/Transformer/encoder_norm/scale").transpose() state_dict["vision_tower.vision_model.post_layernorm.bias"] = state_dict.pop("img/Transformer/encoder_norm/bias") # multimodal projector state_dict['multi_modal_projector.linear.weight'] = state_dict.pop("img/head/kernel").transpose() state_dict['multi_modal_projector.linear.bias'] = state_dict.pop("img/head/bias") # text decoder (gemma) embedding_vector = state_dict.pop("llm/embedder/input_embedding") state_dict["language_model.model.embed_tokens.weight"] = embedding_vector # pop the einsum attention + mlp representations. There are 18 layers in gemma-2b. llm_attention_attn_vec_einsum = state_dict.pop("llm/layers/attn/attn_vec_einsum/w") llm_attention_kv_einsum = state_dict.pop("llm/layers/attn/kv_einsum/w") llm_attention_q_einsum = state_dict.pop("llm/layers/attn/q_einsum/w") llm_mlp_gating_einsum = state_dict.pop("llm/layers/mlp/gating_einsum") llm_mlp_linear = state_dict.pop("llm/layers/mlp/linear") # TODO verify correctness of layer norm loading llm_input_layernorm = state_dict.pop("llm/layers/pre_attention_norm/scale") llm_post_attention_layernorm = state_dict.pop("llm/layers/pre_ffw_norm/scale") for i in range(config.text_config.num_hidden_layers): # llm_attention_q_einsum[i].shape = (8, 2048, 256) q_proj_weight_reshaped = llm_attention_q_einsum[i].transpose(0, 2, 1).reshape(config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size) state_dict[f"language_model.model.layers.{i}.self_attn.q_proj.weight"] = q_proj_weight_reshaped # llm_attention_kv_einsum[i, 0, 0].shape = (2048, 256) k_proj_weight_reshaped = llm_attention_kv_einsum[i, 0, 0].transpose() state_dict[f"language_model.model.layers.{i}.self_attn.k_proj.weight"] = k_proj_weight_reshaped # llm_attention_kv_einsum[i, 1, 0].shape = (2048, 256) v_proj_weight_reshaped = llm_attention_kv_einsum[i, 1, 0].transpose() state_dict[f"language_model.model.layers.{i}.self_attn.v_proj.weight"] = v_proj_weight_reshaped # output projection. # llm_attention_attn_vec_einsum[i].shape = (8, 256, 2048) o_proj_weight_reshaped = llm_attention_attn_vec_einsum[i].transpose(2, 0, 1).reshape(config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size) state_dict[f"language_model.model.layers.{i}.self_attn.o_proj.weight"] = o_proj_weight_reshaped # mlp layers gate_proj_weight = llm_mlp_gating_einsum[i, 0] state_dict[f"language_model.model.layers.{i}.mlp.gate_proj.weight"] = gate_proj_weight.transpose() up_proj_weight = llm_mlp_gating_einsum[i, 1] state_dict[f"language_model.model.layers.{i}.mlp.up_proj.weight"] = up_proj_weight.transpose() state_dict[f"language_model.model.layers.{i}.mlp.down_proj.weight"] = llm_mlp_linear[i].transpose() state_dict[f"language_model.model.layers.{i}.input_layernorm.weight"] = llm_input_layernorm[i] state_dict[f"language_model.model.layers.{i}.post_attention_layernorm.weight"] = llm_post_attention_layernorm[i] state_dict["language_model.model.norm.weight"] = state_dict.pop("llm/final_norm/scale") state_dict["language_model.lm_head.weight"] = embedding_vector # weights are tied. # fmt: on for key, value in state_dict.items(): state_dict[key] = torch.from_numpy(value) return state_dict def flatten_nested_dict(params, parent_key="", sep="/"): items = [] for k, v in params.items(): k = k.removeprefix("params/") new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.abc.MutableMapping): items.extend(flatten_nested_dict(v, parent_key=new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items) @torch.no_grad() def convert_paligemma_checkpoint( checkpoint_path, tokenizer_model_file, pytorch_dump_folder_path, variant: str, precision: str, do_convert_weights=False, ): """ Read checkpoints from flax npz files, rename/reshape, send result to state dict and verify logits if needed. """ config = get_paligemma_config(variant, precision=precision) if do_convert_weights: if variant == "2b-test": # for the test model, the vocabulary was smaller tokenizer_id = "google/gemma-2b" tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) else: tokenizer_class = GemmaTokenizer if GemmaTokenizerFast is None else GemmaTokenizerFast tokenizer = tokenizer_class(tokenizer_model_file) image_token = AddedToken("<image>", normalized=False, special=True) tokens_to_add = {"additional_special_tokens": [image_token]} tokenizer.add_special_tokens(tokens_to_add) # tokenizer.padding_side = 'right' # uncomment for testing purposes only. image_processor = SiglipImageProcessor.from_pretrained("google/siglip-so400m-patch14-384") image_processor.size = {"width": config.vision_config.image_size, "height": config.vision_config.image_size} image_processor.image_seq_length = config.vision_config.num_image_tokens processor = PaliGemmaProcessor(image_processor=image_processor, tokenizer=tokenizer) data = load(checkpoint_path) state_dict = flatten_nested_dict(data) del data state_dict_transformers = slice_state_dict(state_dict, config) del state_dict model = PaliGemmaForConditionalGeneration(config).to(device).eval() model.load_state_dict(state_dict_transformers) del state_dict_transformers else: processor = PaliGemmaProcessor.from_pretrained(pytorch_dump_folder_path) model = ( PaliGemmaForConditionalGeneration.from_pretrained(pytorch_dump_folder_path, attn_implementation="sdpa") .to(device) .eval() ) model.config.text_config._attn_implementation = "sdpa" # model expansion to get random embeds of image tokens pad_shape = 64 # for performance reasons pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data mu = torch.mean(pre_expansion_embeddings, dim=0).float() n = pre_expansion_embeddings.size()[0] sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma) # We add an image token so we resize the model model.resize_token_embeddings(config.text_config.vocab_size + 2, pad_shape) model.language_model.model.embed_tokens.weight.data[257152:] = torch.stack( tuple((dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[257152:].shape[0]))), dim=0, ) model.language_model.lm_head.weight.data[257152:] = torch.stack( tuple((dist.sample() for _ in range(model.language_model.lm_head.weight.data[257152:].shape[0]))), dim=0, ) model.save_pretrained(pytorch_dump_folder_path, max_shard_size="2GB", safe_serialization=True) processor.save_pretrained(pytorch_dump_folder_path) # if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the .npz checkpoint", ) parser.add_argument( "--tokenizer_model_file", required=True, type=str, help="Path to the sentencepiece tokenizer.model file", ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output directory where model and processor will be saved.", ) parser.add_argument( "--precision", choices=["float32", "bfloat16", "float16"], type=str, help="Precision identifier for model conversion - should match the base checkpoint precision.", ) parser.add_argument( "--variant", default="2b-test", choices=PALIGEMMA_VARIANTS, type=str, help="String identifier of the paligemma variant to convert.", ) parser.add_argument( "--do_convert_weights", action="store_true", help="Whether or not to reload and convert the weights." ) args = parser.parse_args() convert_paligemma_checkpoint( checkpoint_path=args.checkpoint_path, tokenizer_model_file=args.tokenizer_model_file, pytorch_dump_folder_path=args.pytorch_dump_folder_path, variant=args.variant, precision=args.precision, do_convert_weights=args.do_convert_weights, )
transformers/src/transformers/models/paligemma/convert_paligemma_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/paligemma/convert_paligemma_weights_to_hf.py", "repo_id": "transformers", "token_count": 7060 }
384
# coding=utf-8 # Copyright 2020 Google and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for model PEGASUS.""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: PegasusTokenizer = None logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} class PegasusTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" PEGASUS tokenizer (backed by HuggingFace's *tokenizers* library). Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. mask_token (`str`, *optional*, defaults to `"<mask_2>"`): The token used for masking single token values. This is the token used when training this model with masked language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining. It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf). mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`): The token used for masking whole target sentences. This is the token used when training this model with gap sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf). additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and <unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66) that uses the tokens 2 - 104 only for pretraining """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = PegasusTokenizer model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file=None, tokenizer_file=None, pad_token="<pad>", eos_token="</s>", unk_token="<unk>", mask_token="<mask_2>", mask_token_sent="<mask_1>", additional_special_tokens=None, offset=103, # entries 2 - 104 are only used for pretraining **kwargs, ): self.offset = offset if additional_special_tokens is not None: if not isinstance(additional_special_tokens, list): raise TypeError( f"additional_special_tokens should be of type {type(list)}, but is" f" {type(additional_special_tokens)}" ) additional_special_tokens_extended = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1) ] if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." ) additional_special_tokens = additional_special_tokens_extended else: additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)] # pegasus was design to support changing the index of the first tokens. If one of the padding/eos/unk/mask token # is different from default, we must rebuild the vocab from_slow = kwargs.pop("from_slow", None) from_slow = from_slow or str(pad_token) != "<pad>" or str(eos_token) != "</s>" or str(unk_token) != "<unk>" kwargs.pop("added_tokens_decoder", {}) super().__init__( vocab_file, tokenizer_file=tokenizer_file, pad_token=pad_token, eos_token=eos_token, unk_token=unk_token, mask_token=mask_token, mask_token_sent=mask_token_sent, offset=offset, additional_special_tokens=additional_special_tokens, from_slow=from_slow, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def _special_token_mask(self, seq): all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}" ) return [1 if x in all_special_ids else 0 for x in seq] def get_special_tokens_mask( self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False ) -> List[int]: """Get list where entries are [1] if a token is [eos] or [pad] else 0.""" if already_has_special_tokens: return self._special_token_mask(token_ids_0) elif token_ids_1 is None: return self._special_token_mask(token_ids_0) + [1] else: return self._special_token_mask(token_ids_0 + token_ids_1) + [1] def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: """ Build model inputs from a sequence by adding eos to the end. no bos token is added to the front. - single sequence: `X </s>` - pair of sequences: `A B </s>` (not intended use) Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_0 + token_ids_1 + [self.eos_token_id] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
transformers/src/transformers/models/pegasus/tokenization_pegasus_fast.py/0
{ "file_path": "transformers/src/transformers/models/pegasus/tokenization_pegasus_fast.py", "repo_id": "transformers", "token_count": 4110 }
385
# coding=utf-8 # Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Phi model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class PhiConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PhiModel`]. It is used to instantiate an Phi model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Phi [microsoft/phi-1](https://huggingface.co/microsoft/phi-1). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 51200): Vocabulary size of the Phi model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`PhiModel`]. hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 8192): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. resid_pdrop (`float`, *optional*, defaults to 0.0): Dropout probability for mlp outputs. embd_pdrop (`int`, *optional*, defaults to 0.0): The dropout ratio for the embeddings. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio after computing the attention scores. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Phi-1 and Phi-1.5 supports up to 2048 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalPersimmon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. partial_rotary_factor (`float`, *optional*, defaults to 0.5): Percentage of the query and keys which will have rotary embedding. qk_layernorm (`bool`, *optional*, defaults to `False`): Whether or not to normalize the Queries and Keys after projecting the hidden states. bos_token_id (`int`, *optional*, defaults to 1): Denotes beginning of sequences token id. eos_token_id (`int`, *optional*, defaults to 2): Denotes end of sequences token id. Example: ```python >>> from transformers import PhiModel, PhiConfig >>> # Initializing a Phi-1 style configuration >>> configuration = PhiConfig.from_pretrained("microsoft/phi-1") >>> # Initializing a model from the configuration >>> model = PhiModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "phi" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=51200, hidden_size=2048, intermediate_size=8192, num_hidden_layers=24, num_attention_heads=32, num_key_value_heads=None, resid_pdrop=0.0, embd_pdrop=0.0, attention_dropout=0.0, hidden_act="gelu_new", max_position_embeddings=2048, initializer_range=0.02, layer_norm_eps=1e-5, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, partial_rotary_factor=0.5, qk_layernorm=False, bos_token_id=1, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attention_dropout = attention_dropout self.hidden_act = hidden_act self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.partial_rotary_factor = partial_rotary_factor self.qk_layernorm = qk_layernorm self._rope_scaling_validation() super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError( "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_factor = self.rope_scaling.get("factor", None) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
transformers/src/transformers/models/phi/configuration_phi.py/0
{ "file_path": "transformers/src/transformers/models/phi/configuration_phi.py", "repo_id": "transformers", "token_count": 3484 }
386
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from torch import nn from transformers import PLBartConfig, PLBartForConditionalGeneration, PLBartForSequenceClassification def remove_ignore_keys_(state_dict): ignore_keys = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(k, None) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def convert_fairseq_plbart_checkpoint_from_disk( checkpoint_path, hf_config_path="uclanlp/plbart-base", finetuned=False, classification=False ): state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] remove_ignore_keys_(state_dict) vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0] plbart_config = PLBartConfig.from_pretrained(hf_config_path, vocab_size=vocab_size) state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"] if not classification: model = PLBartForConditionalGeneration(plbart_config) model.model.load_state_dict(state_dict) if finetuned: model.lm_head = make_linear_from_emb(model.model.shared) else: classification_head = {} for key, value in state_dict.copy().items(): if key.startswith("classification_heads.sentence_classification_head"): classification_head[key.replace("classification_heads.sentence_classification_head.", "")] = value state_dict.pop(key) model = PLBartForSequenceClassification(plbart_config) model.model.load_state_dict(state_dict) model.classification_head.load_state_dict(classification_head) return model if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="uclanlp/plbart-base", type=str, help="Which huggingface architecture to use: plbart-base", ) parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") parser.add_argument( "--classification", action="store_true", help="whether the model is a classification checkpoint" ) args = parser.parse_args() model = convert_fairseq_plbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, classification=args.classification, ) model.save_pretrained(args.pytorch_dump_folder_path)
transformers/src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py/0
{ "file_path": "transformers/src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py", "repo_id": "transformers", "token_count": 1325 }
387
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_prophetnet": ["ProphetNetConfig"], "tokenization_prophetnet": ["ProphetNetTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_prophetnet"] = [ "ProphetNetDecoder", "ProphetNetEncoder", "ProphetNetForCausalLM", "ProphetNetForConditionalGeneration", "ProphetNetModel", "ProphetNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_prophetnet import ProphetNetConfig from .tokenization_prophetnet import ProphetNetTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_prophetnet import ( ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel, ProphetNetPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/prophetnet/__init__.py/0
{ "file_path": "transformers/src/transformers/models/prophetnet/__init__.py", "repo_id": "transformers", "token_count": 707 }
388
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RAG model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings RAG_CONFIG_DOC = r""" [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. """ @add_start_docstrings(RAG_CONFIG_DOC) class RagConfig(PretrainedConfig): model_type = "rag" is_composition = True def __init__( self, vocab_size=None, is_encoder_decoder=True, prefix=None, bos_token_id=None, pad_token_id=None, eos_token_id=None, decoder_start_token_id=None, title_sep=" / ", doc_sep=" // ", n_docs=5, max_combined_length=300, retrieval_vector_size=768, retrieval_batch_size=8, dataset="wiki_dpr", dataset_split="train", index_name="compressed", index_path=None, passages_path=None, use_dummy_dataset=False, reduce_loss=False, label_smoothing=0.0, do_deduplication=True, exclude_bos_score=False, do_marginalize=False, output_retrieved=False, use_cache=True, forced_eos_token_id=None, dataset_revision=None, **kwargs, ): super().__init__( bos_token_id=bos_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, is_encoder_decoder=is_encoder_decoder, prefix=prefix, vocab_size=vocab_size, **kwargs, ) if "question_encoder" not in kwargs or "generator" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"both `question_encoder` and `generator` sub-configurations were not passed, only {kwargs}" ) question_encoder_config = kwargs.pop("question_encoder") question_encoder_model_type = question_encoder_config.pop("model_type") decoder_config = kwargs.pop("generator") decoder_model_type = decoder_config.pop("model_type") from ..auto.configuration_auto import AutoConfig self.question_encoder = AutoConfig.for_model(question_encoder_model_type, **question_encoder_config) self.generator = AutoConfig.for_model(decoder_model_type, **decoder_config) self.reduce_loss = reduce_loss self.label_smoothing = label_smoothing self.exclude_bos_score = exclude_bos_score self.do_marginalize = do_marginalize self.title_sep = title_sep self.doc_sep = doc_sep self.n_docs = n_docs self.max_combined_length = max_combined_length self.dataset = dataset self.dataset_split = dataset_split self.index_name = index_name self.retrieval_vector_size = retrieval_vector_size self.retrieval_batch_size = retrieval_batch_size self.passages_path = passages_path self.index_path = index_path self.use_dummy_dataset = use_dummy_dataset self.dataset_revision = dataset_revision self.output_retrieved = output_retrieved self.do_deduplication = do_deduplication self.use_cache = use_cache if self.forced_eos_token_id is None: self.forced_eos_token_id = getattr(self.generator, "forced_eos_token_id", None) @classmethod def from_question_encoder_generator_configs( cls, question_encoder_config: PretrainedConfig, generator_config: PretrainedConfig, **kwargs ) -> PretrainedConfig: r""" Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and decoder model configuration. Returns: [`EncoderDecoderConfig`]: An instance of a configuration object """ return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs)
transformers/src/transformers/models/rag/configuration_rag.py/0
{ "file_path": "transformers/src/transformers/models/rag/configuration_rag.py", "repo_id": "transformers", "token_count": 3441 }
389
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RegNet model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class RegNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RegNetModel`]. It is used to instantiate a RegNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RegNet [facebook/regnet-y-040](https://huggingface.co/facebook/regnet-y-040) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. embedding_size (`int`, *optional*, defaults to 64): Dimensionality (hidden size) for the embedding layer. hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`): Dimensionality (hidden size) at each stage. depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`): Depth (number of layers) for each stage. layer_type (`str`, *optional*, defaults to `"y"`): The layer to use, it can be either `"x" or `"y"`. An `x` layer is a ResNet's BottleNeck layer with `reduction` fixed to `1`. While a `y` layer is a `x` but with squeeze and excitation. Please refer to the paper for a detailed explanation of how these layers were constructed. hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. downsample_in_first_stage (`bool`, *optional*, defaults to `False`): If `True`, the first stage will downsample the inputs using a `stride` of 2. Example: ```python >>> from transformers import RegNetConfig, RegNetModel >>> # Initializing a RegNet regnet-y-40 style configuration >>> configuration = RegNetConfig() >>> # Initializing a model from the regnet-y-40 style configuration >>> model = RegNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "regnet" layer_types = ["x", "y"] def __init__( self, num_channels=3, embedding_size=32, hidden_sizes=[128, 192, 512, 1088], depths=[2, 6, 12, 2], groups_width=64, layer_type="y", hidden_act="relu", **kwargs, ): super().__init__(**kwargs) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}") self.num_channels = num_channels self.embedding_size = embedding_size self.hidden_sizes = hidden_sizes self.depths = depths self.groups_width = groups_width self.layer_type = layer_type self.hidden_act = hidden_act # always downsample in the first stage self.downsample_in_first_stage = True
transformers/src/transformers/models/regnet/configuration_regnet.py/0
{ "file_path": "transformers/src/transformers/models/regnet/configuration_regnet.py", "repo_id": "transformers", "token_count": 1422 }
390
# coding=utf-8 # Copyright 2023 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.traverse_util import flatten_dict, unflatten_dict from ...modeling_flax_outputs import ( FlaxBaseModelOutputWithNoAttention, FlaxBaseModelOutputWithPoolingAndNoAttention, FlaxImageClassifierOutputWithNoAttention, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward from .configuration_resnet import ResNetConfig RESNET_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ RESNET_INPUTS_DOCSTRING = r""" Args: pixel_values (`jax.numpy.float32` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`AutoImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class Identity(nn.Module): """Identity function.""" @nn.compact def __call__(self, x, **kwargs): return x class FlaxResNetConvLayer(nn.Module): out_channels: int kernel_size: int = 3 stride: int = 1 activation: Optional[str] = "relu" dtype: jnp.dtype = jnp.float32 def setup(self): self.convolution = nn.Conv( self.out_channels, kernel_size=(self.kernel_size, self.kernel_size), strides=self.stride, padding=self.kernel_size // 2, dtype=self.dtype, use_bias=False, kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="normal", dtype=self.dtype), ) self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype) self.activation_func = ACT2FN[self.activation] if self.activation is not None else Identity() def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: hidden_state = self.convolution(x) hidden_state = self.normalization(hidden_state, use_running_average=deterministic) hidden_state = self.activation_func(hidden_state) return hidden_state class FlaxResNetEmbeddings(nn.Module): """ ResNet Embeddings (stem) composed of a single aggressive convolution. """ config: ResNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embedder = FlaxResNetConvLayer( self.config.embedding_size, kernel_size=7, stride=2, activation=self.config.hidden_act, dtype=self.dtype, ) self.max_pool = partial(nn.max_pool, window_shape=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1))) def __call__(self, pixel_values: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: num_channels = pixel_values.shape[-1] if num_channels != self.config.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embedding = self.embedder(pixel_values, deterministic=deterministic) embedding = self.max_pool(embedding) return embedding class FlaxResNetShortCut(nn.Module): """ ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ out_channels: int stride: int = 2 dtype: jnp.dtype = jnp.float32 def setup(self): self.convolution = nn.Conv( self.out_channels, kernel_size=(1, 1), strides=self.stride, use_bias=False, kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), dtype=self.dtype, ) self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype) def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: hidden_state = self.convolution(x) hidden_state = self.normalization(hidden_state, use_running_average=deterministic) return hidden_state class FlaxResNetBasicLayerCollection(nn.Module): out_channels: int stride: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): self.layer = [ FlaxResNetConvLayer(self.out_channels, stride=self.stride, dtype=self.dtype), FlaxResNetConvLayer(self.out_channels, activation=None, dtype=self.dtype), ] def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: for layer in self.layer: hidden_state = layer(hidden_state, deterministic=deterministic) return hidden_state class FlaxResNetBasicLayer(nn.Module): """ A classic ResNet's residual layer composed by two `3x3` convolutions. """ in_channels: int out_channels: int stride: int = 1 activation: Optional[str] = "relu" dtype: jnp.dtype = jnp.float32 def setup(self): should_apply_shortcut = self.in_channels != self.out_channels or self.stride != 1 self.shortcut = ( FlaxResNetShortCut(self.out_channels, stride=self.stride, dtype=self.dtype) if should_apply_shortcut else None ) self.layer = FlaxResNetBasicLayerCollection( out_channels=self.out_channels, stride=self.stride, dtype=self.dtype, ) self.activation_func = ACT2FN[self.activation] def __call__(self, hidden_state, deterministic: bool = True): residual = hidden_state hidden_state = self.layer(hidden_state, deterministic=deterministic) if self.shortcut is not None: residual = self.shortcut(residual, deterministic=deterministic) hidden_state += residual hidden_state = self.activation_func(hidden_state) return hidden_state class FlaxResNetBottleNeckLayerCollection(nn.Module): out_channels: int stride: int = 1 activation: Optional[str] = "relu" reduction: int = 4 dtype: jnp.dtype = jnp.float32 def setup(self): reduces_channels = self.out_channels // self.reduction self.layer = [ FlaxResNetConvLayer(reduces_channels, kernel_size=1, dtype=self.dtype, name="0"), FlaxResNetConvLayer(reduces_channels, stride=self.stride, dtype=self.dtype, name="1"), FlaxResNetConvLayer(self.out_channels, kernel_size=1, activation=None, dtype=self.dtype, name="2"), ] def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: for layer in self.layer: hidden_state = layer(hidden_state, deterministic=deterministic) return hidden_state class FlaxResNetBottleNeckLayer(nn.Module): """ A classic ResNet's bottleneck layer composed by three `3x3` convolutions. The first `1x1` convolution reduces the input by a factor of `reduction` in order to make the second `3x3` convolution faster. The last `1x1` convolution remaps the reduced features to `out_channels`. """ in_channels: int out_channels: int stride: int = 1 activation: Optional[str] = "relu" reduction: int = 4 dtype: jnp.dtype = jnp.float32 def setup(self): should_apply_shortcut = self.in_channels != self.out_channels or self.stride != 1 self.shortcut = ( FlaxResNetShortCut(self.out_channels, stride=self.stride, dtype=self.dtype) if should_apply_shortcut else None ) self.layer = FlaxResNetBottleNeckLayerCollection( self.out_channels, stride=self.stride, activation=self.activation, reduction=self.reduction, dtype=self.dtype, ) self.activation_func = ACT2FN[self.activation] def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: residual = hidden_state if self.shortcut is not None: residual = self.shortcut(residual, deterministic=deterministic) hidden_state = self.layer(hidden_state, deterministic) hidden_state += residual hidden_state = self.activation_func(hidden_state) return hidden_state class FlaxResNetStageLayersCollection(nn.Module): """ A ResNet stage composed by stacked layers. """ config: ResNetConfig in_channels: int out_channels: int stride: int = 2 depth: int = 2 dtype: jnp.dtype = jnp.float32 def setup(self): layer = FlaxResNetBottleNeckLayer if self.config.layer_type == "bottleneck" else FlaxResNetBasicLayer layers = [ # downsampling is done in the first layer with stride of 2 layer( self.in_channels, self.out_channels, stride=self.stride, activation=self.config.hidden_act, dtype=self.dtype, name="0", ), ] for i in range(self.depth - 1): layers.append( layer( self.out_channels, self.out_channels, activation=self.config.hidden_act, dtype=self.dtype, name=str(i + 1), ) ) self.layers = layers def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: hidden_state = x for layer in self.layers: hidden_state = layer(hidden_state, deterministic=deterministic) return hidden_state class FlaxResNetStage(nn.Module): """ A ResNet stage composed by stacked layers. """ config: ResNetConfig in_channels: int out_channels: int stride: int = 2 depth: int = 2 dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = FlaxResNetStageLayersCollection( self.config, in_channels=self.in_channels, out_channels=self.out_channels, stride=self.stride, depth=self.depth, dtype=self.dtype, ) def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: return self.layers(x, deterministic=deterministic) class FlaxResNetStageCollection(nn.Module): config: ResNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): in_out_channels = zip(self.config.hidden_sizes, self.config.hidden_sizes[1:]) stages = [ FlaxResNetStage( self.config, self.config.embedding_size, self.config.hidden_sizes[0], stride=2 if self.config.downsample_in_first_stage else 1, depth=self.config.depths[0], dtype=self.dtype, name="0", ) ] for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, self.config.depths[1:])): stages.append( FlaxResNetStage(self.config, in_channels, out_channels, depth=depth, dtype=self.dtype, name=str(i + 1)) ) self.stages = stages def __call__( self, hidden_state: jnp.ndarray, output_hidden_states: bool = False, deterministic: bool = True, ) -> FlaxBaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state.transpose(0, 3, 1, 2),) hidden_state = stage_module(hidden_state, deterministic=deterministic) return hidden_state, hidden_states class FlaxResNetEncoder(nn.Module): config: ResNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.stages = FlaxResNetStageCollection(self.config, dtype=self.dtype) def __call__( self, hidden_state: jnp.ndarray, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ) -> FlaxBaseModelOutputWithNoAttention: hidden_state, hidden_states = self.stages( hidden_state, output_hidden_states=output_hidden_states, deterministic=deterministic ) if output_hidden_states: hidden_states = hidden_states + (hidden_state.transpose(0, 3, 1, 2),) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return FlaxBaseModelOutputWithNoAttention( last_hidden_state=hidden_state, hidden_states=hidden_states, ) class FlaxResNetPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ResNetConfig base_model_prefix = "resnet" main_input_name = "pixel_values" module_class: nn.Module = None def __init__( self, config: ResNetConfig, input_shape=(1, 224, 224, 3), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) if input_shape is None: input_shape = (1, config.image_size, config.image_size, config.num_channels) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors pixel_values = jnp.zeros(input_shape, dtype=self.dtype) rngs = {"params": rng} random_params = self.module.init(rngs, pixel_values, return_dict=False) if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING) def __call__( self, pixel_values, params: dict = None, train: bool = False, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) # Handle any PRNG if needed rngs = {} return self.module.apply( { "params": params["params"] if params is not None else self.params["params"], "batch_stats": params["batch_stats"] if params is not None else self.params["batch_stats"], }, jnp.array(pixel_values, dtype=jnp.float32), not train, output_hidden_states, return_dict, rngs=rngs, mutable=["batch_stats"] if train else False, # Returing tuple with batch_stats only when train is True ) class FlaxResNetModule(nn.Module): config: ResNetConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.embedder = FlaxResNetEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxResNetEncoder(self.config, dtype=self.dtype) # Adaptive average pooling used in resnet self.pooler = partial( nn.avg_pool, padding=((0, 0), (0, 0)), ) def __call__( self, pixel_values, deterministic: bool = True, output_hidden_states: bool = False, return_dict: bool = True, ) -> FlaxBaseModelOutputWithPoolingAndNoAttention: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values, deterministic=deterministic) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler( last_hidden_state, window_shape=(last_hidden_state.shape[1], last_hidden_state.shape[2]), strides=(last_hidden_state.shape[1], last_hidden_state.shape[2]), ).transpose(0, 3, 1, 2) last_hidden_state = last_hidden_state.transpose(0, 3, 1, 2) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return FlaxBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top.", RESNET_START_DOCSTRING, ) class FlaxResNetModel(FlaxResNetPreTrainedModel): module_class = FlaxResNetModule FLAX_VISION_MODEL_DOCSTRING = """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, FlaxResNetModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50") >>> model = FlaxResNetModel.from_pretrained("microsoft/resnet-50") >>> inputs = image_processor(images=image, return_tensors="np") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ overwrite_call_docstring(FlaxResNetModel, FLAX_VISION_MODEL_DOCSTRING) append_replace_return_docstrings( FlaxResNetModel, output_type=FlaxBaseModelOutputWithPoolingAndNoAttention, config_class=ResNetConfig ) class FlaxResNetClassifierCollection(nn.Module): config: ResNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype, name="1") def __call__(self, x: jnp.ndarray) -> jnp.ndarray: return self.classifier(x) class FlaxResNetForImageClassificationModule(nn.Module): config: ResNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.resnet = FlaxResNetModule(config=self.config, dtype=self.dtype) if self.config.num_labels > 0: self.classifier = FlaxResNetClassifierCollection(self.config, dtype=self.dtype) else: self.classifier = Identity() def __call__( self, pixel_values=None, deterministic: bool = True, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.resnet( pixel_values, deterministic=deterministic, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output[:, :, 0, 0]) if not return_dict: output = (logits,) + outputs[2:] return output return FlaxImageClassifierOutputWithNoAttention(logits=logits, hidden_states=outputs.hidden_states) @add_start_docstrings( """ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, RESNET_START_DOCSTRING, ) class FlaxResNetForImageClassification(FlaxResNetPreTrainedModel): module_class = FlaxResNetForImageClassificationModule FLAX_VISION_CLASSIF_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoImageProcessor, FlaxResNetForImageClassification >>> from PIL import Image >>> import jax >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50") >>> model = FlaxResNetForImageClassification.from_pretrained("microsoft/resnet-50") >>> inputs = image_processor(images=image, return_tensors="np") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = jax.numpy.argmax(logits, axis=-1) >>> print("Predicted class:", model.config.id2label[predicted_class_idx.item()]) ``` """ overwrite_call_docstring(FlaxResNetForImageClassification, FLAX_VISION_CLASSIF_DOCSTRING) append_replace_return_docstrings( FlaxResNetForImageClassification, output_type=FlaxImageClassifierOutputWithNoAttention, config_class=ResNetConfig )
transformers/src/transformers/models/resnet/modeling_flax_resnet.py/0
{ "file_path": "transformers/src/transformers/models/resnet/modeling_flax_resnet.py", "repo_id": "transformers", "token_count": 10373 }
391
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_seamless_m4t": ["SeamlessM4TConfig"], "feature_extraction_seamless_m4t": ["SeamlessM4TFeatureExtractor"], "processing_seamless_m4t": ["SeamlessM4TProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_seamless_m4t"] = ["SeamlessM4TTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_seamless_m4t_fast"] = ["SeamlessM4TTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_seamless_m4t"] = [ "SeamlessM4TForTextToSpeech", "SeamlessM4TForSpeechToSpeech", "SeamlessM4TForTextToText", "SeamlessM4TForSpeechToText", "SeamlessM4TModel", "SeamlessM4TPreTrainedModel", "SeamlessM4TCodeHifiGan", "SeamlessM4THifiGan", "SeamlessM4TTextToUnitForConditionalGeneration", "SeamlessM4TTextToUnitModel", ] if TYPE_CHECKING: from .configuration_seamless_m4t import SeamlessM4TConfig from .feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor from .processing_seamless_m4t import SeamlessM4TProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_seamless_m4t import SeamlessM4TTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_seamless_m4t_fast import SeamlessM4TTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_seamless_m4t import ( SeamlessM4TCodeHifiGan, SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, SeamlessM4TForTextToSpeech, SeamlessM4TForTextToText, SeamlessM4THifiGan, SeamlessM4TModel, SeamlessM4TPreTrainedModel, SeamlessM4TTextToUnitForConditionalGeneration, SeamlessM4TTextToUnitModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/seamless_m4t/__init__.py/0
{ "file_path": "transformers/src/transformers/models/seamless_m4t/__init__.py", "repo_id": "transformers", "token_count": 1447 }
392
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Segformer.""" from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import INIT_SERVICE_KWARGS, BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import ( TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, is_vision_available, logging, ) from ...utils.deprecation import deprecate_kwarg if is_vision_available(): import PIL.Image if is_torch_available(): import torch logger = logging.get_logger(__name__) class SegformerImageProcessor(BaseImageProcessor): r""" Constructs a Segformer image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"height": 512, "width": 512}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. do_reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] @deprecate_kwarg("reduce_labels", new_name="do_reduce_labels", version="4.41.0") @filter_out_non_signature_kwargs(extra=INIT_SERVICE_KWARGS) def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_reduce_labels: bool = False, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 512, "width": 512} size = get_size_dict(size) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_reduce_labels = do_reduce_labels @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to save support of deprecated `reduce_labels` in old configs """ image_processor_dict = image_processor_dict.copy() if "reduce_labels" in image_processor_dict: image_processor_dict["do_reduce_labels"] = image_processor_dict.pop("reduce_labels") return super().from_dict(image_processor_dict, **kwargs) # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.reduce_label def reduce_label(self, label: ImageInput) -> np.ndarray: label = to_numpy_array(label) # Avoid using underflow conversion label[label == 0] = 255 label = label - 1 label[label == 254] = 255 return label def _preprocess( self, image: ImageInput, do_reduce_labels: bool, do_resize: bool, do_rescale: bool, do_normalize: bool, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, rescale_factor: Optional[float] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): if do_reduce_labels: image = self.reduce_label(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) return image def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" # All transformations expect numpy arrays. image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) image = self._preprocess( image=image, do_reduce_labels=False, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format, ) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def _preprocess_mask( self, segmentation_map: ImageInput, do_reduce_labels: bool = None, do_resize: bool = None, size: Dict[str, int] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single mask.""" segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] input_data_format = ChannelDimension.FIRST else: added_channel_dim = False if input_data_format is None: input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1) # reduce zero label if needed segmentation_map = self._preprocess( image=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, do_rescale=False, do_normalize=False, input_data_format=input_data_format, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) segmentation_map = segmentation_map.astype(np.int64) return segmentation_map def __call__(self, images, segmentation_maps=None, **kwargs): """ Preprocesses a batch of images and optionally segmentation maps. Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be passed in as positional arguments. """ return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs) @deprecate_kwarg("reduce_labels", new_name="do_reduce_labels", version="4.41.0") @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_reduce_labels: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. segmentation_maps (`ImageInput`, *optional*): Segmentation map to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after `resize` is applied. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels resample = resample if resample is not None else self.resample size = size if size is not None else self.size rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) images = [ self._preprocess_image( image=img, do_resize=do_resize, resample=resample, size=size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format, ) for img in images ] data = {"pixel_values": images} if segmentation_maps is not None: segmentation_maps = [ self._preprocess_mask( segmentation_map=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, size=size, input_data_format=input_data_format, ) for segmentation_map in segmentation_maps ] data["labels"] = segmentation_maps return BatchFeature(data=data, tensor_type=return_tensors) # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->Segformer def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None): """ Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`SegformerForSemanticSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple]` of length `batch_size`, *optional*): List of tuples corresponding to the requested final size (height, width) of each prediction. If unset, predictions will not be resized. Returns: semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ # TODO: add support for other frameworks logits = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(target_sizes): target_sizes = target_sizes.numpy() semantic_segmentation = [] for idx in range(len(logits)): resized_logits = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = logits.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
transformers/src/transformers/models/segformer/image_processing_segformer.py/0
{ "file_path": "transformers/src/transformers/models/segformer/image_processing_segformer.py", "repo_id": "transformers", "token_count": 9854 }
393
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, is_vision_available, ) _import_structure = { "configuration_siglip": [ "SiglipConfig", "SiglipTextConfig", "SiglipVisionConfig", ], "processing_siglip": ["SiglipProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_siglip"] = ["SiglipTokenizer"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_siglip"] = ["SiglipImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_siglip"] = [ "SiglipModel", "SiglipPreTrainedModel", "SiglipTextModel", "SiglipVisionModel", "SiglipForImageClassification", ] if TYPE_CHECKING: from .configuration_siglip import ( SiglipConfig, SiglipTextConfig, SiglipVisionConfig, ) from .processing_siglip import SiglipProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_siglip import SiglipTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_siglip import SiglipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_siglip import ( SiglipForImageClassification, SiglipModel, SiglipPreTrainedModel, SiglipTextModel, SiglipVisionModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/siglip/__init__.py/0
{ "file_path": "transformers/src/transformers/models/siglip/__init__.py", "repo_id": "transformers", "token_count": 1125 }
394
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Feature extractor class for Speech2Text """ from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, is_speech_available, logging if is_speech_available(): import torch import torchaudio.compliance.kaldi as ta_kaldi logger = logging.get_logger(__name__) class Speech2TextFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a Speech2Text feature extractor. This feature extractor inherits from [`Speech2TextFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy otherwise, and applies utterance-level cepstral mean and variance normalization to the extracted features. Args: feature_size (`int`, *optional*, defaults to 80): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 16000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). num_mel_bins (`int`, *optional*, defaults to 80): Number of Mel-frequency bins. padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding vectors. do_ceptral_normalize (`bool`, *optional*, defaults to `True`): Whether or not to apply utterance-level cepstral mean and variance normalization to extracted features. normalize_means (`bool`, *optional*, defaults to `True`): Whether or not to zero-mean normalize the extracted features. normalize_vars (`bool`, *optional*, defaults to `True`): Whether or not to unit-variance normalize the extracted features. """ model_input_names = ["input_features", "attention_mask"] def __init__( self, feature_size=80, sampling_rate=16000, num_mel_bins=80, padding_value=0.0, do_ceptral_normalize=True, normalize_means=True, normalize_vars=True, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.num_mel_bins = num_mel_bins self.do_ceptral_normalize = do_ceptral_normalize self.normalize_means = normalize_means self.normalize_vars = normalize_vars self.return_attention_mask = True if not is_speech_available(): mel_filters = mel_filter_bank( num_frequency_bins=256, num_mel_filters=self.num_mel_bins, min_frequency=20, max_frequency=sampling_rate // 2, sampling_rate=sampling_rate, norm=None, mel_scale="kaldi", triangularize_in_mel_space=True, ) self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0))) self.window = window_function(400, "povey", periodic=False) def _extract_fbank_features( self, waveform: np.ndarray, ) -> np.ndarray: """ Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs and hence the waveform should not be normalized before feature extraction. """ waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers if is_speech_available(): waveform = torch.from_numpy(waveform).unsqueeze(0) features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate) features = features.numpy() else: waveform = np.squeeze(waveform) features = spectrogram( waveform, self.window, frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, preemphasis=0.97, mel_filters=self.mel_filters, log_mel="log", mel_floor=1.192092955078125e-07, remove_dc_offset=True, ).T return features @staticmethod def utterance_cmvn( x: np.ndarray, input_length: int, normalize_means: Optional[bool] = True, normalize_vars: Optional[bool] = True, padding_value: float = 0.0, ) -> np.ndarray: # make sure we normalize float32 arrays if normalize_means: mean = x[:input_length].mean(axis=0) x = np.subtract(x, mean) if normalize_vars: std = x[:input_length].std(axis=0) x = np.divide(x, std) if input_length < x.shape[0]: x[input_length:] = padding_value # make sure array is in float32 x = x.astype(np.float32) return x def normalize( self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None ) -> List[np.ndarray]: lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(x, n, self.normalize_means, self.normalize_vars, self.padding_value) for x, n in zip(input_features, lengths) ] def __call__( self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Union[bool, str, PaddingStrategy] = False, max_length: Optional[int] = None, truncation: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, return_attention_mask: Optional[bool] = None, **kwargs, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) <Tip> For Speech2TextTransformer models, `attention_mask` should always be passed for batched inference, to avoid subtle bugs. </Tip> return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values / vectors. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) # always return batch if not is_batched: raw_speech = [raw_speech] # extract fbank features features = [self._extract_fbank_features(waveform) for waveform in raw_speech] # convert into correct format for padding encoded_inputs = BatchFeature({"input_features": features}) padded_inputs = self.pad( encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, **kwargs, ) # make sure list is in array format input_features = padded_inputs.get("input_features") if isinstance(input_features[0], list): padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features] attention_mask = padded_inputs.get("attention_mask") if attention_mask is not None: padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: attention_mask = ( np.array(attention_mask, dtype=np.int32) if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD else None ) padded_inputs["input_features"] = self.normalize( padded_inputs["input_features"], attention_mask=attention_mask ) if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs
transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py/0
{ "file_path": "transformers/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py", "repo_id": "transformers", "token_count": 5611 }
395
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import requests import torch from PIL import Image from transformers import SuperPointConfig, SuperPointForKeypointDetection, SuperPointImageProcessor def get_superpoint_config(): config = SuperPointConfig( encoder_hidden_sizes=[64, 64, 128, 128], decoder_hidden_size=256, keypoint_decoder_dim=65, descriptor_decoder_dim=256, keypoint_threshold=0.005, max_keypoints=-1, nms_radius=4, border_removal_distance=4, initializer_range=0.02, ) return config def create_rename_keys(config, state_dict): rename_keys = [] # Encoder weights rename_keys.append(("conv1a.weight", "encoder.conv_blocks.0.conv_a.weight")) rename_keys.append(("conv1b.weight", "encoder.conv_blocks.0.conv_b.weight")) rename_keys.append(("conv2a.weight", "encoder.conv_blocks.1.conv_a.weight")) rename_keys.append(("conv2b.weight", "encoder.conv_blocks.1.conv_b.weight")) rename_keys.append(("conv3a.weight", "encoder.conv_blocks.2.conv_a.weight")) rename_keys.append(("conv3b.weight", "encoder.conv_blocks.2.conv_b.weight")) rename_keys.append(("conv4a.weight", "encoder.conv_blocks.3.conv_a.weight")) rename_keys.append(("conv4b.weight", "encoder.conv_blocks.3.conv_b.weight")) rename_keys.append(("conv1a.bias", "encoder.conv_blocks.0.conv_a.bias")) rename_keys.append(("conv1b.bias", "encoder.conv_blocks.0.conv_b.bias")) rename_keys.append(("conv2a.bias", "encoder.conv_blocks.1.conv_a.bias")) rename_keys.append(("conv2b.bias", "encoder.conv_blocks.1.conv_b.bias")) rename_keys.append(("conv3a.bias", "encoder.conv_blocks.2.conv_a.bias")) rename_keys.append(("conv3b.bias", "encoder.conv_blocks.2.conv_b.bias")) rename_keys.append(("conv4a.bias", "encoder.conv_blocks.3.conv_a.bias")) rename_keys.append(("conv4b.bias", "encoder.conv_blocks.3.conv_b.bias")) # Keypoint Decoder weights rename_keys.append(("convPa.weight", "keypoint_decoder.conv_score_a.weight")) rename_keys.append(("convPb.weight", "keypoint_decoder.conv_score_b.weight")) rename_keys.append(("convPa.bias", "keypoint_decoder.conv_score_a.bias")) rename_keys.append(("convPb.bias", "keypoint_decoder.conv_score_b.bias")) # Descriptor Decoder weights rename_keys.append(("convDa.weight", "descriptor_decoder.conv_descriptor_a.weight")) rename_keys.append(("convDb.weight", "descriptor_decoder.conv_descriptor_b.weight")) rename_keys.append(("convDa.bias", "descriptor_decoder.conv_descriptor_a.bias")) rename_keys.append(("convDb.bias", "descriptor_decoder.conv_descriptor_b.bias")) return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_imgs(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im1 = Image.open(requests.get(url, stream=True).raw) url = "http://images.cocodataset.org/test-stuff2017/000000004016.jpg" im2 = Image.open(requests.get(url, stream=True).raw) return [im1, im2] @torch.no_grad() def convert_superpoint_checkpoint(checkpoint_url, pytorch_dump_folder_path, save_model, push_to_hub, test_mode=False): """ Copy/paste/tweak model's weights to our SuperPoint structure. """ print("Downloading original model from checkpoint...") config = get_superpoint_config() # load original state_dict from URL original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url) print("Converting model parameters...") # rename keys rename_keys = create_rename_keys(config, original_state_dict) new_state_dict = original_state_dict.copy() for src, dest in rename_keys: rename_key(new_state_dict, src, dest) # Load HuggingFace model model = SuperPointForKeypointDetection(config) model.load_state_dict(new_state_dict) model.eval() print("Successfully loaded weights in the model") # Check model outputs preprocessor = SuperPointImageProcessor() inputs = preprocessor(images=prepare_imgs(), return_tensors="pt") outputs = model(**inputs) # If test_mode is True, we check that the model outputs match the original results if test_mode: torch.count_nonzero(outputs.mask[0]) expected_keypoints_shape = (2, 830, 2) expected_scores_shape = (2, 830) expected_descriptors_shape = (2, 830, 256) expected_keypoints_values = torch.tensor([[480.0, 9.0], [494.0, 9.0], [489.0, 16.0]]) expected_scores_values = torch.tensor([0.0064, 0.0140, 0.0595, 0.0728, 0.5170, 0.0175, 0.1523, 0.2055, 0.0336]) expected_descriptors_value = torch.tensor(-0.1096) assert outputs.keypoints.shape == expected_keypoints_shape assert outputs.scores.shape == expected_scores_shape assert outputs.descriptors.shape == expected_descriptors_shape assert torch.allclose(outputs.keypoints[0, :3], expected_keypoints_values, atol=1e-3) assert torch.allclose(outputs.scores[0, :9], expected_scores_values, atol=1e-3) assert torch.allclose(outputs.descriptors[0, 0, 0], expected_descriptors_value, atol=1e-3) print("Model outputs match the original results!") if save_model: print("Saving model to local...") # Create folder to save model if not os.path.isdir(pytorch_dump_folder_path): os.mkdir(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) preprocessor.save_pretrained(pytorch_dump_folder_path) model_name = "superpoint" if push_to_hub: print(f"Pushing {model_name} to the hub...") model.push_to_hub(model_name) preprocessor.push_to_hub(model_name) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/magicleap/SuperPointPretrainedNetwork/raw/master/superpoint_v1.pth", type=str, help="URL of the original SuperPoint checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="model", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--save_model", action="store_true", help="Save model to local") parser.add_argument("--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub") args = parser.parse_args() convert_superpoint_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub )
transformers/src/transformers/models/superpoint/convert_superpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/superpoint/convert_superpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 2857 }
396
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Swin2SR checkpoints from the original repository. URL: https://github.com/mv-lab/swin2sr""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import Swin2SRConfig, Swin2SRForImageSuperResolution, Swin2SRImageProcessor def get_config(checkpoint_url): config = Swin2SRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: config.upscale = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: config.upscale = 4 config.image_size = 48 config.upsampler = "pixelshuffle_aux" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: config.depths = [6, 6, 6, 6] config.embed_dim = 60 config.num_heads = [6, 6, 6, 6] config.upsampler = "pixelshuffledirect" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: config.upscale = 4 config.upsampler = "nearest+conv" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: config.num_channels = 1 config.upscale = 1 config.image_size = 126 config.window_size = 7 config.img_range = 255.0 config.upsampler = "" return config def rename_key(name, config): if "patch_embed.proj" in name and "layers" not in name: name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "embeddings.patch_embeddings.layernorm") if "layers" in name: name = name.replace("layers", "encoder.stages") if "residual_group.blocks" in name: name = name.replace("residual_group.blocks", "layers") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "q_bias" in name: name = name.replace("q_bias", "query.bias") if "k_bias" in name: name = name.replace("k_bias", "key.bias") if "v_bias" in name: name = name.replace("v_bias", "value.bias") if "cpb_mlp" in name: name = name.replace("cpb_mlp", "continuous_position_bias_mlp") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "patch_embed.projection") if name == "norm.weight": name = "layernorm.weight" if name == "norm.bias": name = "layernorm.bias" if "conv_first" in name: name = name.replace("conv_first", "first_convolution") if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: name = name.replace("conv_last", "final_convolution") if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: name = name.replace("conv_before_upsample.0", "conv_before_upsample") if "upsample.0" in name: name = name.replace("upsample.0", "upsample.convolution_0") if "upsample.2" in name: name = name.replace("upsample.2", "upsample.convolution_1") name = "upsample." + name elif config.upsampler == "pixelshuffledirect": name = name.replace("upsample.0.weight", "upsample.conv.weight") name = name.replace("upsample.0.bias", "upsample.conv.bias") else: pass else: name = "swin2sr." + name return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "qkv" in key: key_split = key.split(".") stage_num = int(key_split[1]) block_num = int(key_split[4]) dim = config.embed_dim if "weight" in key: orig_state_dict[ f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.query.weight" ] = val[:dim, :] orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.key.weight"] = ( val[dim : dim * 2, :] ) orig_state_dict[ f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.value.weight" ] = val[-dim:, :] else: orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.query.bias"] = ( val[:dim] ) orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.key.bias"] = ( val[dim : dim * 2] ) orig_state_dict[f"swin2sr.encoder.stages.{stage_num}.layers.{block_num}.attention.self.value.bias"] = ( val[-dim:] ) pass else: orig_state_dict[rename_key(key, config)] = val return orig_state_dict def convert_swin2sr_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub): config = get_config(checkpoint_url) model = Swin2SRForImageSuperResolution(config) model.eval() state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") new_state_dict = convert_state_dict(state_dict, config) missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) if len(missing_keys) > 0: raise ValueError("Missing keys when converting: {}".format(missing_keys)) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"Unexpected key {key} in state_dict") # verify values url = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") processor = Swin2SRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values image_size = 126 if "Jpeg" in checkpoint_url else 256 transforms = Compose( [ Resize((image_size, image_size)), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) pixel_values = transforms(image).unsqueeze(0) if config.num_channels == 1: pixel_values = pixel_values[:, 0, :, :].unsqueeze(1) outputs = model(pixel_values) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: expected_shape = torch.Size([1, 3, 512, 512]) expected_slice = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: expected_shape = torch.Size([1, 3, 1024, 1024]) expected_slice = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here expected_shape = torch.Size([1, 3, 1024, 1024]) expected_slice = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: expected_shape = torch.Size([1, 3, 512, 512]) expected_slice = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: expected_shape = torch.Size([1, 3, 1024, 1024]) expected_slice = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], expected_slice, atol=1e-3) print("Looks ok!") url_to_name = { "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": ( "swin2SR-classical-sr-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": ( "swin2SR-classical-sr-x4-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": ( "swin2SR-compressed-sr-x4-48" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": ( "swin2SR-lightweight-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": ( "swin2SR-realworld-sr-x4-64-bsrgan-psnr" ), } model_name = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub(f"caidas/{model_name}") processor.push_to_hub(f"caidas/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") args = parser.parse_args() convert_swin2sr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/swin2sr/convert_swin2sr_original_to_pytorch.py", "repo_id": "transformers", "token_count": 5322 }
397
# coding=utf-8 # Copyright 2022 Google LLC and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert T5X checkpoint to PyTorch Steps: - Install gsutil according to https://cloud.google.com/storage/docs/gsutil_install - Get a T5X checkpoint at https://github.com/google-research/t5x/blob/main/docs/models.md#t5-11-checkpoints Example: `gsutil -m cp -r gs://t5-data/pretrained_models/t5x/t5_1_1_small $HOME/` - Create or download a corresponding config for the downloaded model. E.g. for T5 v1.1 small, you can use https://huggingface.co/google/t5-v1_1-small/blob/main/config.json - Convert: ``` python3 convert_t5x_checkpoint_to_pytorch.py --t5x_checkpoint_path=$HOME/t5_1_1_small --config_file=config.json\ --pytorch_dump_path=$HOME/t5_1_1_small_pt ``` """ import argparse import collections import torch from flax import traverse_util from t5x import checkpoints from transformers import T5Config, T5EncoderModel, T5ForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def t5x_attention_lookup(params, i, prefix, layer_name="attention"): """Returns the KOQV parameters of (self-)attention. Does not transpose.""" k = params[f"{prefix}/layers_{i}/{layer_name}/key/kernel"] o = params[f"{prefix}/layers_{i}/{layer_name}/out/kernel"] q = params[f"{prefix}/layers_{i}/{layer_name}/query/kernel"] v = params[f"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def t5x_mlp_lookup(params, i, prefix, split_mlp_wi=False): """Returns the MLP parameters of a layer. Does not transpose.""" if split_mlp_wi: wi_0 = params[f"{prefix}/layers_{i}/mlp/wi_0/kernel"] wi_1 = params[f"{prefix}/layers_{i}/mlp/wi_1/kernel"] wi = (wi_0, wi_1) else: wi = params[f"{prefix}/layers_{i}/mlp/wi/kernel"] wo = params[f"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def t5x_layer_norm_lookup(params, i, prefix, layer_name): """Returns the layer norm param of a layer.""" return params[f"{prefix}/layers_{i}/{layer_name}/scale"] def convert_t5x_to_pytorch(variables: dict, *, num_layers: int, num_decoder_layers: int, is_encoder_only: bool): """Converts the parameters from T5X-Flax to Transformers-PyTorch.""" old = traverse_util.flatten_dict(variables["target"]) old = {"/".join(k): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi split_mlp_wi = "encoder/layers_0/mlp/wi_0/kernel" in old print("Split MLP:", split_mlp_wi) new = collections.OrderedDict() # Shared embeddings. new["shared.weight"] = old["token_embedder/embedding"] # Encoder. for i in range(num_layers): # Block i, layer 0 (Self Attention). layer_norm = t5x_layer_norm_lookup(old, i, "encoder", "pre_attention_layer_norm") k, o, q, v = t5x_attention_lookup(old, i, "encoder", "attention") new[f"encoder.block.{i}.layer.0.layer_norm.weight"] = layer_norm new[f"encoder.block.{i}.layer.0.SelfAttention.k.weight"] = k.T new[f"encoder.block.{i}.layer.0.SelfAttention.o.weight"] = o.T new[f"encoder.block.{i}.layer.0.SelfAttention.q.weight"] = q.T new[f"encoder.block.{i}.layer.0.SelfAttention.v.weight"] = v.T # Block i, layer 1 (MLP). layer_norm = t5x_layer_norm_lookup(old, i, "encoder", "pre_mlp_layer_norm") wi, wo = t5x_mlp_lookup(old, i, "encoder", split_mlp_wi) new[f"encoder.block.{i}.layer.1.layer_norm.weight"] = layer_norm if split_mlp_wi: new[f"encoder.block.{i}.layer.1.DenseReluDense.wi_0.weight"] = wi[0].T new[f"encoder.block.{i}.layer.1.DenseReluDense.wi_1.weight"] = wi[1].T else: new[f"encoder.block.{i}.layer.1.DenseReluDense.wi.weight"] = wi.T new[f"encoder.block.{i}.layer.1.DenseReluDense.wo.weight"] = wo.T new["encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = old[ "encoder/relpos_bias/rel_embedding" ].T new["encoder.final_layer_norm.weight"] = old["encoder/encoder_norm/scale"] if not is_encoder_only: # Decoder. for i in range(num_decoder_layers): # Block i, layer 0 (Self Attention). layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_self_attention_layer_norm") k, o, q, v = t5x_attention_lookup(old, i, "decoder", "self_attention") new[f"decoder.block.{i}.layer.0.layer_norm.weight"] = layer_norm new[f"decoder.block.{i}.layer.0.SelfAttention.k.weight"] = k.T new[f"decoder.block.{i}.layer.0.SelfAttention.o.weight"] = o.T new[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"] = q.T new[f"decoder.block.{i}.layer.0.SelfAttention.v.weight"] = v.T # Block i, layer 1 (Cross Attention). layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_cross_attention_layer_norm") k, o, q, v = t5x_attention_lookup(old, i, "decoder", "encoder_decoder_attention") new[f"decoder.block.{i}.layer.1.layer_norm.weight"] = layer_norm new[f"decoder.block.{i}.layer.1.EncDecAttention.k.weight"] = k.T new[f"decoder.block.{i}.layer.1.EncDecAttention.o.weight"] = o.T new[f"decoder.block.{i}.layer.1.EncDecAttention.q.weight"] = q.T new[f"decoder.block.{i}.layer.1.EncDecAttention.v.weight"] = v.T # Block i, layer 2 (MLP). layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_mlp_layer_norm") wi, wo = t5x_mlp_lookup(old, i, "decoder", split_mlp_wi) new[f"decoder.block.{i}.layer.2.layer_norm.weight"] = layer_norm if split_mlp_wi: new[f"decoder.block.{i}.layer.2.DenseReluDense.wi_0.weight"] = wi[0].T new[f"decoder.block.{i}.layer.2.DenseReluDense.wi_1.weight"] = wi[1].T else: new[f"decoder.block.{i}.layer.2.DenseReluDense.wi.weight"] = wi.T new[f"decoder.block.{i}.layer.2.DenseReluDense.wo.weight"] = wo.T new["decoder.final_layer_norm.weight"] = old["decoder/decoder_norm/scale"] new["decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = old[ "decoder/relpos_bias/rel_embedding" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: new["lm_head.weight"] = old["decoder/logits_dense/kernel"].T return new def make_state_dict(converted_params, is_encoder_only: bool): """Prepares a state dict for the PyTorch model.""" # Make a state dict with torch tensors. state_dict = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()]) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: state_dict["encoder.embed_tokens.weight"] = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: state_dict["decoder.embed_tokens.weight"] = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head.") state_dict["lm_head.weight"] = state_dict["shared.weight"] return state_dict def load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only): """Replaces the params in model witht the T5X converted params.""" variables = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) converted = convert_t5x_to_pytorch( variables, num_layers=config.num_layers, num_decoder_layers=config.num_decoder_layers, is_encoder_only=is_encoder_only, ) state_dict = make_state_dict(converted, is_encoder_only) model.load_state_dict(state_dict, strict=True) def convert_t5x_checkpoint_to_pytorch( t5x_checkpoint_path, config_file, pytorch_dump_path, is_encoder_only: bool = False ): """Loads the config and model, converts the T5X checkpoint, and saves a PyTorch checkpoint.""" # Initialise PyTorch model config = T5Config.from_json_file(config_file) print(f"Building PyTorch model from configuration: {config}") # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: model = T5EncoderModel(config) else: model = T5ForConditionalGeneration(config) # Load weights from tf checkpoint load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) # Verify that we can load the checkpoint. model.from_pretrained(pytorch_dump_path) print("Done") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) args = parser.parse_args() convert_t5x_checkpoint_to_pytorch( args.t5x_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
transformers/src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 4540 }
398
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 TAPAS model.""" from __future__ import annotations import enum import math from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPooling, TFMaskedLMOutput, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_tensorflow_probability_available, logging, replace_return_docstrings, ) from .configuration_tapas import TapasConfig logger = logging.get_logger(__name__) # soft dependency if is_tensorflow_probability_available(): try: import tensorflow_probability as tfp # On the first call, check whether a compatible version of TensorFlow is installed # TensorFlow Probability depends on a recent stable release of TensorFlow n = tfp.distributions.Normal(loc=0.0, scale=1.0) except ImportError: logger.error( "TAPAS models are not usable since `tensorflow_probability` can't be loaded. " "It seems you have `tensorflow_probability` installed with the wrong tensorflow version. " "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability." ) else: try: import tensorflow_probability as tfp # On the first call, check whether a compatible version of TensorFlow is installed # TensorFlow Probability depends on a recent stable release of TensorFlow _ = tfp.distributions.Normal(loc=0.0, scale=1.0) except ImportError: pass _CONFIG_FOR_DOC = "TapasConfig" _CHECKPOINT_FOR_DOC = "google/tapas-base" EPSILON_ZERO_DIVISION = 1e-10 CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0 @dataclass class TFTableQuestionAnsweringOutput(ModelOutput): """ Output type of [`TFTapasForQuestionAnswering`]. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)): Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations. logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Prediction scores of the cell selection head, for every token. logits_aggregation (`tf.Tensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`): Prediction scores of the aggregation head, for every aggregation operator. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor = None logits_aggregation: tf.Tensor | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None class TFTapasEmbeddings(keras.layers.Layer): """ Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of additional token type embeddings to encode tabular structure. """ def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.config = config self.number_of_token_type_embeddings = len(config.type_vocab_sizes) self.reset_position_index_per_cell = config.reset_position_index_per_cell self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) for i, type_vocab_size in enumerate(self.config.type_vocab_sizes): with tf.name_scope(f"token_type_embeddings_{i}"): setattr( self, f"token_type_embeddings_{i}", self.add_weight( name="embeddings", shape=[type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] seq_length = input_shape[1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape + [self.number_of_token_type_embeddings], value=0) if position_ids is None: # create absolute position embeddings position_ids = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0) position_ids = tf.broadcast_to(position_ids, shape=input_shape) # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings if self.reset_position_index_per_cell: # shape (batch_size, seq_len) col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1) # shape (batch_size, seq_len) row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1) # shape (batch_size, seq_len) full_index = ProductIndexMap(col_index, row_index) # shape (max_rows * max_columns,). First absolute position for every cell first_position_per_segment = reduce_min(position_ids, full_index)[0] # ? shape (batch_size, seq_len). First absolute position of the cell for every token first_position = gather(first_position_per_segment, full_index) # shape (1, seq_len) position = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0) position_ids = tf.math.minimum(self.max_position_embeddings - 1, position - first_position) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) position_embeddings = tf.gather(self.position_embeddings, indices=position_ids) final_embeddings = inputs_embeds + position_embeddings for i in range(self.number_of_token_type_embeddings): name = f"token_type_embeddings_{i}" final_embeddings += tf.gather(params=getattr(self, name), indices=token_type_ids[:, :, i]) final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Tapas class TFTapasSelfAttention(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFTapasModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Tapas class TFTapasSelfOutput(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Tapas class TFTapasAttention(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFTapasSelfAttention(config, name="self") self.dense_output = TFTapasSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: self_outputs = self.self_attention( hidden_states=input_tensor, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) # add attentions (possibly with past_key_value) if we output them outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Tapas class TFTapasIntermediate(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Tapas class TFTapasOutput(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Tapas class TFTapasLayer(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.attention = TFTapasAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TFTapasAttention(config, name="crossattention") self.intermediate = TFTapasIntermediate(config, name="intermediate") self.bert_output = TFTapasOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: Tuple[tf.Tensor] | None, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) if getattr(self, "crossattention", None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Tapas class TFTapasEncoder(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFTapasLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_values: Tuple[Tuple[tf.Tensor]] | None, use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Tapas class TFTapasPooler(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Tapas class TFTapasPredictionHeadTransform(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Tapas class TFTapasLMPredictionHead(keras.layers.Layer): def __init__(self, config: TapasConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.transform = TFTapasPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) def get_output_embeddings(self) -> keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Tapas class TFTapasMLMHead(keras.layers.Layer): def __init__(self, config: TapasConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFTapasLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) @keras_serializable class TFTapasMainLayer(keras.layers.Layer): config_class = TapasConfig def __init__(self, config: TapasConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFTapasEmbeddings(config, name="embeddings") self.encoder = TFTapasEncoder(config, name="encoder") self.pooler = TFTapasPooler(config, name="pooler") if add_pooling_layer else None def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape + [len(self.config.type_vocab_sizes)], value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFTapasPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TapasConfig base_model_prefix = "tapas" @property def input_signature(self): return { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.float32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None, 7), tf.int32, name="token_type_ids"), } TAPAS_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`TapasConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TAPAS_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0}, 7)`, *optional*): Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this class for more info. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. If `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be used. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.", TAPAS_START_DOCSTRING, ) class TFTapasModel(TFTapasPreTrainedModel): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.tapas = TFTapasMainLayer(config, name="tapas") @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasModel >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base") >>> model = TapasModel.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "tapas", None) is not None: with tf.name_scope(self.tapas.name): self.tapas.build(None) @add_start_docstrings("""Tapas Model with a `language modeling` head on top.""", TAPAS_START_DOCSTRING) class TFTapasForMaskedLM(TFTapasPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if config.is_decoder: logger.warning( "If you want to use `TFTapasForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.tapas = TFTapasMainLayer(config, add_pooling_layer=False, name="tapas") self.lm_head = TFTapasMLMHead(config, input_embeddings=self.tapas.embeddings, name="cls") def get_lm_head(self) -> keras.layers.Layer: return self.lm_head.predictions @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForMaskedLM >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base") >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> inputs = tokenizer( ... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="tf" ... ) >>> labels = tokenizer( ... table=table, queries="How many movies has George Clooney played in?", return_tensors="tf" ... )["input_ids"] >>> outputs = model(**inputs, labels=labels) >>> logits = outputs.logits ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "tapas", None) is not None: with tf.name_scope(self.tapas.name): self.tapas.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) class TFTapasComputeTokenLogits(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) self.temperature = config.temperature # cell selection heads with tf.name_scope("output"): self.output_weights = self.add_weight( name="output_weights", shape=(config.hidden_size,), dtype=tf.float32, trainable=True, initializer=tf.zeros_initializer() if config.init_cell_selection_weights_to_zero else keras.initializers.TruncatedNormal(stddev=config.initializer_range), ) self.output_bias = self.add_weight( name="output_bias", shape=(), trainable=True, initializer=tf.zeros_initializer() ) def call(self, sequence_output: tf.Tensor) -> tf.Tensor: """ Computes logits per token Args: sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. Returns: logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Logits per token. """ logits = (tf.einsum("bsj,j->bs", sequence_output, self.output_weights) + self.output_bias) / self.temperature return logits class TFTapasComputeColumnLogits(keras.layers.Layer): def __init__(self, config: TapasConfig, **kwargs): super().__init__(**kwargs) with tf.name_scope("column_output"): self.column_output_weights = self.add_weight( name="column_output_weights", shape=[config.hidden_size], dtype=tf.float32, trainable=True, initializer=tf.zeros_initializer() if config.init_cell_selection_weights_to_zero else keras.initializers.TruncatedNormal(stddev=config.initializer_range), ) self.column_output_bias = self.add_weight( name="column_output_bias", shape=(), trainable=True, initializer=tf.zeros_initializer() ) def call(self, sequence_output, cell_index, cell_mask, allow_empty_column_selection) -> tf.Tensor: """ Computes the column logits. Args: sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. cell_index (`ProductIndexMap`): Index that groups tokens into cells. cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). allow_empty_column_selection (`bool`): Whether to allow not to select any column Returns: column_logits (`tf.Tensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits for every example in the batch. """ # First, compute the token logits (batch_size, seq_len) - without temperature token_logits = tf.einsum("bsj,j->bs", sequence_output, self.column_output_weights) + self.column_output_bias # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows) cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index) # Finally, average the logits per column (batch_size, max_num_cols) column_index = cell_index.project_inner(cell_logits_index) column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index) cell_count, _ = reduce_sum(cell_mask, column_index) column_logits /= cell_count + EPSILON_ZERO_DIVISION # Mask columns that do not appear in the example. is_padding = tf.logical_and(cell_count < 0.5, tf.not_equal(out_index.indices, 0)) column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(is_padding, tf.float32) if not allow_empty_column_selection: column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(tf.equal(out_index.indices, 0), tf.float32) return column_logits @add_start_docstrings( """ Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for SQA, WTQ or WikiSQL-supervised tasks. """, TAPAS_START_DOCSTRING, ) class TFTapasForQuestionAnswering(TFTapasPreTrainedModel): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) # base model self.tapas = TFTapasMainLayer(config, name="tapas") # dropout self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.compute_token_logits = TFTapasComputeTokenLogits(config, name="compute_token_logits") self.compute_column_logits = TFTapasComputeColumnLogits(config, name="compute_column_logits") if config.num_aggregation_labels > 0: self.aggregation_classifier = keras.layers.Dense( config.num_aggregation_labels, kernel_initializer=get_initializer(config.initializer_range), name="aggregation_classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFTableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, table_mask: np.ndarray | tf.Tensor | None = None, aggregation_labels: np.ndarray | tf.Tensor | None = None, float_answer: np.ndarray | tf.Tensor | None = None, numeric_values: np.ndarray | tf.Tensor | None = None, numeric_values_scale: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFTableQuestionAnsweringOutput, Tuple[tf.Tensor]]: r""" table_mask (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and padding are 0. labels (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the answer appearing in the table. Can be obtained using [`AutoTokenizer`]. - 1 for tokens that are **part of the answer**, - 0 for tokens that are **not part of the answer**. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`, *optional*): Aggregation function index for every example in the batch for computing the aggregation loss. Indices should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for aggregation (WikiSQL-supervised). float_answer (`tf.Tensor` of shape `(batch_size, )`, *optional*): Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*): Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForQuestionAnswering >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") >>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> logits_aggregation = outputs.logits_aggregation ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] pooled_output = outputs[1] sequence_output = self.dropout(sequence_output) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] # Construct indices for the table. if token_type_ids is None: token_type_ids = tf.fill(input_shape + [len(self.config.type_vocab_sizes)], 0) token_types = [ "segment_ids", "column_ids", "row_ids", "prev_labels", "column_ranks", "inv_column_ranks", "numeric_relations", ] row_ids = token_type_ids[:, :, token_types.index("row_ids")] column_ids = token_type_ids[:, :, token_types.index("column_ids")] # Construct indices for the table. row_index = IndexMap( indices=tf.minimum(tf.cast(row_ids, tf.int32), self.config.max_num_rows - 1), num_segments=self.config.max_num_rows, batch_dims=1, ) col_index = IndexMap( indices=tf.minimum(tf.cast(column_ids, tf.int32), self.config.max_num_columns - 1), num_segments=self.config.max_num_columns, batch_dims=1, ) cell_index = ProductIndexMap(row_index, col_index) # Masks. input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)[:-1] if attention_mask is None: attention_mask = tf.ones(input_shape) # Table cells only, without question tokens and table headers. if table_mask is None: table_mask = tf.where(row_ids > 0, tf.ones_like(row_ids), tf.zeros_like(row_ids)) # <float32>[batch_size, seq_length] input_mask_float = tf.cast(attention_mask, tf.float32) table_mask_float = tf.cast(table_mask, tf.float32) # Mask for cells that exist in the table (i.e. that are not padding). cell_mask, _ = reduce_mean(input_mask_float, cell_index) # Compute logits per token. These are used to select individual cells. logits = self.compute_token_logits(sequence_output) # Compute logits per column. These are used to select a column. column_logits = None if self.config.select_one_column: column_logits = self.compute_column_logits( sequence_output, cell_index, cell_mask, self.config.allow_empty_column_selection ) # Aggregate logits. logits_aggregation = None if self.config.num_aggregation_labels > 0: logits_aggregation = self.aggregation_classifier(pooled_output) # Total loss calculation total_loss = tf.zeros(shape=(1,), dtype=tf.float32) calculate_loss = False if labels is not None: calculate_loss = True is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision # Semi-supervised cell selection in case of no aggregation: # If the answer (the denotation) appears directly in the table we might # select the answer without applying any aggregation function. There are # some ambiguous cases, see utils._calculate_aggregate_mask for more info. # `aggregate_mask` is 1 for examples where we chose to aggregate and 0 # for examples where we chose to select the answer directly. # `labels` encodes the positions of the answer appearing in the table. if is_supervised: aggregate_mask = None else: if float_answer is not None: assert ( shape_list(labels)[0] == shape_list(float_answer)[0] ), "Make sure the answers are a FloatTensor of shape (batch_size,)" # <float32>[batch_size] aggregate_mask = _calculate_aggregate_mask( float_answer, pooled_output, self.config.cell_selection_preference, labels, self.aggregation_classifier, ) else: aggregate_mask = None raise ValueError("You have to specify float answers in order to calculate the aggregate mask") # Cell selection log-likelihood if self.config.average_logits_per_cell: logits_per_cell, _ = reduce_mean(logits, cell_index) logits = gather(logits_per_cell, cell_index) dist_per_token = tfp.distributions.Bernoulli(logits=logits) # Compute cell selection loss per example. selection_loss_per_example = None if not self.config.select_one_column: weight = tf.where( labels == 0, tf.ones_like(labels, dtype=tf.float32), self.config.positive_label_weight * tf.ones_like(labels, dtype=tf.float32), ) selection_loss_per_token = -dist_per_token.log_prob(labels) * weight selection_loss_per_example = tf.reduce_sum(selection_loss_per_token * input_mask_float, axis=1) / ( tf.reduce_sum(input_mask_float, axis=1) + EPSILON_ZERO_DIVISION ) else: selection_loss_per_example, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) dist_per_token = tfp.distributions.Bernoulli(logits=logits) # Supervised cell selection if self.config.disable_per_token_loss: pass elif is_supervised: total_loss += tf.reduce_mean(selection_loss_per_example) else: # For the not supervised case, do not assign loss for cell selection total_loss += tf.reduce_mean(selection_loss_per_example * (1.0 - aggregate_mask)) # Semi-supervised regression loss and supervised loss for aggregations if self.config.num_aggregation_labels > 0: if is_supervised: # Note that `aggregate_mask` is None if the setting is supervised. if aggregation_labels is not None: assert ( shape_list(labels)[0] == shape_list(aggregation_labels)[0] ), "Make sure the aggregation labels are a LongTensor of shape (batch_size,)" per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) else: raise ValueError( "You have to specify aggregation labels in order to calculate the aggregation loss" ) else: aggregation_labels = tf.zeros(shape_list(labels)[0], dtype=tf.int32) per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) if self.config.use_answer_as_supervision: if numeric_values is not None and numeric_values_scale is not None: assert shape_list(numeric_values) == shape_list(numeric_values_scale) # Add regression loss for numeric answers which require aggregation. answer_loss, large_answer_loss_mask = _calculate_regression_loss( float_answer, aggregate_mask, dist_per_token, numeric_values, numeric_values_scale, table_mask_float, logits_aggregation, self.config, ) per_example_additional_loss += answer_loss # Zero loss for examples with answer_loss > cutoff. per_example_additional_loss *= large_answer_loss_mask else: raise ValueError( "You have to specify numeric values and numeric values scale in order to calculate the" " regression loss" ) total_loss += tf.reduce_mean(per_example_additional_loss) else: # if no label ids are provided, set them to zeros in order to properly compute logits labels = tf.zeros_like(logits) _, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) if not return_dict: output = (logits, logits_aggregation) + outputs[2:] return ((total_loss,) + output) if calculate_loss else output return TFTableQuestionAnsweringOutput( loss=total_loss if calculate_loss else None, logits=logits, logits_aggregation=logits_aggregation, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "tapas", None) is not None: with tf.name_scope(self.tapas.name): self.tapas.build(None) if getattr(self, "compute_token_logits", None) is not None: with tf.name_scope(self.compute_token_logits.name): self.compute_token_logits.build(None) if getattr(self, "compute_column_logits", None) is not None: with tf.name_scope(self.compute_column_logits.name): self.compute_column_logits.build(None) if getattr(self, "aggregation_classifier", None) is not None: with tf.name_scope(self.aggregation_classifier.name): self.aggregation_classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020). """, TAPAS_START_DOCSTRING, ) class TFTapasForSequenceClassification(TFTapasPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: TapasConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.tapas = TFTapasMainLayer(config, name="tapas") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called "classification_class_index" in the original implementation. Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForSequenceClassification >>> import tensorflow as tf >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact") >>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = [ ... "There is only one actor who is 45 years old", ... "There are 3 actors which played in more than 60 movies", ... ] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf") >>> labels = tf.convert_to_tensor([1, 0]) # 1 means entailed, 0 means refuted >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ```""" outputs = self.tapas( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "tapas", None) is not None: with tf.name_scope(self.tapas.name): self.tapas.build(None) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) """ TAPAS utilities.""" class AverageApproximationFunction(str, enum.Enum): RATIO = "ratio" FIRST_ORDER = "first_order" SECOND_ORDER = "second_order" # Beginning of everything related to segmented tensors class IndexMap: """Index grouping entries within a tensor.""" def __init__(self, indices, num_segments, batch_dims=0): """ Creates an index. Args: indices: <int32> Tensor of indices, same shape as `values`. num_segments: <int32> Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same number of segments (although many segments can be empty). batch_dims: Python integer, the number of batch dimensions. The first `batch_dims` dimensions of a SegmentedTensor are treated as batch dimensions. Segments in different batch elements are always distinct even if they have the same index. """ self.indices = tf.convert_to_tensor(indices) self.num_segments = tf.convert_to_tensor(num_segments) self.batch_dims = batch_dims def batch_shape(self): return tf.shape(self.indices)[: self.batch_dims] class ProductIndexMap(IndexMap): """The product of two indices.""" def __init__(self, outer_index, inner_index): """ Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has `num_segments` equal to `outer_index.num_segements` * `inner_index.num_segments`. Args: outer_index: IndexMap. inner_index: IndexMap, must have the same shape as `outer_index`. """ if outer_index.batch_dims != inner_index.batch_dims: raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.") super(ProductIndexMap, self).__init__( indices=( inner_index.indices + outer_index.indices * tf.cast(inner_index.num_segments, inner_index.indices.dtype) ), num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims, ) self.outer_index = outer_index self.inner_index = inner_index def project_outer(self, index): """Projects an index with the same index set onto the outer components.""" return IndexMap( indices=tf.math.floordiv(index.indices, self.inner_index.num_segments), num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims, ) def project_inner(self, index): """Projects an index with the same index set onto the inner components.""" return IndexMap( indices=tf.math.floormod(index.indices, self.inner_index.num_segments), num_segments=self.inner_index.num_segments, batch_dims=index.batch_dims, ) def gather(values, index, name="segmented_gather"): """ Gathers from `values` using the index map. For each element in the domain of the index map this operation looks up a value for that index in `values`. Two elements from the same segment always get assigned the same value. Args: values: [B1, ..., Bn, num_segments, V1, ...] Tensor with segment values. index: [B1, ..., Bn, I1, ..., Ik] IndexMap. name: Name for the TensorFlow operation. Returns: [B1, ..., Bn, I1, ..., Ik, V1, ...] Tensor with the gathered values. """ return tf.gather(values, index.indices, batch_dims=index.batch_dims, name=name) def flatten(index, name="segmented_flatten"): """ Flattens a batched index map to a 1d index map. This operation relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by `num_segments` * (k - 1). The result is a tensor with `num_segments` multiplied by the number of elements in the batch. Args: index: IndexMap to flatten. name: Name for the TensorFlow operation. Returns: The flattened IndexMap. """ batch_size = tf.reduce_prod(index.batch_shape()) offset = tf.range(batch_size) * index.num_segments offset = tf.reshape(offset, index.batch_shape()) for _ in range(index.batch_dims, index.indices.shape.rank): offset = tf.expand_dims(offset, -1) indices = tf.cast(offset, index.indices.dtype) + index.indices return IndexMap(indices=tf.reshape(indices, [-1]), num_segments=index.num_segments * batch_size, batch_dims=0) def range_index_map(batch_shape, num_segments, name="range_index_map"): """ Constructs an index map equal to range(num_segments). Args: batch_shape (`tf.Tensor`): Batch shape num_segments (`int`): Number of segments name (`str`, *optional*, defaults to 'range_index_map'): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ batch_shape = tf.convert_to_tensor(batch_shape) batch_shape.shape.assert_has_rank(1) num_segments = tf.convert_to_tensor(num_segments) num_segments.shape.assert_has_rank(0) indices = tf.range(num_segments) shape = tf.concat([tf.ones_like(batch_shape, dtype=tf.int32), tf.expand_dims(num_segments, axis=0)], axis=0) indices = tf.reshape(indices, shape) multiples = tf.concat([batch_shape, [1]], axis=0) indices = tf.tile(indices, multiples) return IndexMap(indices=indices, num_segments=num_segments, batch_dims=batch_shape.shape.as_list()[0]) def _segment_reduce(values, index, segment_reduce_fn, name): """ Applies a segment reduction segment-wise. Args: values (`tf.Tensor`): Tensor with segment values. index (`IndexMap`): IndexMap. segment_reduce_fn (`str`): Name for the reduce operation. One of "sum", "mean", "max" or "min". name (`str`): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ # Flatten the batch dimensions, as segments ops do not support batching. # However if `values` has extra dimensions to the right keep them # unflattened. Segmented ops support vector-valued operations. flat_index = flatten(index) vector_shape = tf.shape(values)[index.indices.shape.rank :] flattened_shape = tf.concat([[-1], vector_shape], axis=0) flat_values = tf.reshape(values, flattened_shape) segment_means = segment_reduce_fn( data=flat_values, segment_ids=flat_index.indices, num_segments=flat_index.num_segments ) # Unflatten the values. new_shape = tf.concat([index.batch_shape(), [index.num_segments], vector_shape], axis=0) output_values = tf.reshape(segment_means, new_shape) output_index = range_index_map(index.batch_shape(), index.num_segments) return output_values, output_index def reduce_mean(values, index, name="segmented_reduce_mean"): """ Averages a tensor over its segments. Outputs 0 for empty segments. This operations computes the mean over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a mean of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_mean, name) def reduce_sum(values, index, name="segmented_reduce_sum"): """ Sums a tensor over its segments. Outputs 0 for empty segments. This operations computes the sum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a sum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_sum, name) def reduce_max(values, index, name="segmented_reduce_max"): """ Computes the maximum over segments. This operations computes the maximum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be an element-wise maximum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be averaged. index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments. name: Name for the TensorFlow ops. Returns: A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, tf.math.unsorted_segment_max, name) def reduce_min(values, index, name="segmented_reduce_min"): """Computes the minimum over segments.""" return _segment_reduce(values, index, tf.math.unsorted_segment_min, name) def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask): """ Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside the selected column are never selected. Args: token_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Tensor containing the logits per token. column_logits (`tf.Tensor` of shape `(batch_size, max_num_cols)`): Tensor containing the logits per column. labels (`tf.Tensor` of shape `(batch_size, sequence_length)`): Labels per token. cell_index (`ProductIndexMap`): Index that groups tokens into cells. col_index (`IndexMap`): Index that groups tokens into columns. cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). Returns: selection_loss_per_example (`tf.Tensor` of shape `(batch_size,)`): Loss for each example. logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): New logits which are only allowed to select cells in a single column. Logits outside of the most likely column according to *column_logits* will be set to a very low value (such that the probabilities are 0). """ # First find the column we should select. We use the column with maximum # number of selected cells. labels_per_column, _ = reduce_sum(tf.cast(labels, tf.float32), col_index) column_label = tf.argmax(labels_per_column, axis=-1, output_type=tf.int32) # Check if there are no selected cells in the column. In that case the model # should predict the special column id 0, which means "select nothing". no_cell_selected = tf.equal(tf.reduce_max(labels_per_column, axis=-1), 0) column_label = tf.where(no_cell_selected, tf.zeros_like(column_label), column_label) column_dist = tfp.distributions.Categorical(logits=column_logits) column_loss_per_example = -column_dist.log_prob(column_label) # Reduce the labels and logits to per-cell from per-token. logits_per_cell, _ = reduce_mean(token_logits, cell_index) labels_per_cell, labels_index = reduce_max(tf.cast(labels, tf.int32), cell_index) # Mask for the selected column. column_id_for_cells = cell_index.project_inner(labels_index).indices column_mask = tf.cast(tf.equal(column_id_for_cells, tf.expand_dims(column_label, axis=1)), tf.float32) # Compute the log-likelihood for cells, but only for the selected column. cell_dist = tfp.distributions.Bernoulli(logits=logits_per_cell) cell_log_prob = cell_dist.log_prob(labels_per_cell) cell_loss = -tf.reduce_sum(cell_log_prob * column_mask * cell_mask, axis=1) # We need to normalize the loss by the number of cells in the column. cell_loss /= tf.reduce_sum(column_mask * cell_mask, axis=1) + EPSILON_ZERO_DIVISION selection_loss_per_example = column_loss_per_example selection_loss_per_example += tf.where(no_cell_selected, tf.zeros_like(selection_loss_per_example), cell_loss) # Set the probs outside the selected column (selected by the *model*) # to 0. This ensures backwards compatibility with models that select # cells from multiple columns. selected_column_id = tf.argmax(column_logits, axis=-1, output_type=tf.int32) selected_column_mask = tf.cast( tf.equal(column_id_for_cells, tf.expand_dims(selected_column_id, axis=-1)), tf.float32 ) # Never select cells with the special column id 0. selected_column_mask = tf.where( tf.equal(column_id_for_cells, 0), tf.zeros_like(selected_column_mask), selected_column_mask ) logits_per_cell += CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask) logits = gather(logits_per_cell, cell_index) return selection_loss_per_example, logits def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier): """ Finds examples where the model should select cells with no aggregation. Returns a mask that determines for which examples should the model select answers directly from the table, without any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold for this is a hyperparameter *cell_selection_preference* Args: answer (`tf.Tensor` of shape `(batch_size, )`): Answer for every example in the batch. Nan if there is no scalar answer. pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Output of the pooler (BertPooler) on top of the encoder layer. cell_selection_preference (`float`): Preference for cell selection in ambiguous cases. labels (`tf.Tensor` of shape `(batch_size, sequence_length)`): Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head Returns: aggregate_mask (`tf.Tensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. """ # tf.Tensor(batch_size,) aggregate_mask_init = tf.cast(tf.logical_not(tf.math.is_nan(answer)), tf.float32) logits_aggregation = aggregation_classifier(pooled_output) dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1) # Cell selection examples according to current model. is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference # Examples with non-empty cell selection supervision. is_cell_supervision_available = tf.reduce_sum(labels, axis=1) > 0 aggregate_mask = tf.where( tf.logical_and(is_pred_cell_selection, is_cell_supervision_available), tf.zeros_like(aggregate_mask_init, dtype=tf.float32), aggregate_mask_init, ) aggregate_mask = tf.stop_gradient(aggregate_mask) return aggregate_mask def _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ): """ Calculates aggregation loss when its type is known during training. In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation" should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting where aggregation type is always known, standard cross entropy loss is accumulated for all examples Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. Returns: aggregation_loss_known (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (when its type is known during training) per example. """ if use_answer_as_supervision: # Prepare "no aggregation" targets for cell selection examples. target_aggregation = tf.zeros_like(aggregate_mask, dtype=tf.int32) else: # Use aggregation supervision as the target. target_aggregation = aggregation_labels one_hot_labels = tf.one_hot(target_aggregation, depth=num_aggregation_labels, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits_aggregation, axis=-1) # <float32>[batch_size] per_example_aggregation_intermediate = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) if use_answer_as_supervision: # Accumulate loss only for examples requiring cell selection # (no aggregation). return per_example_aggregation_intermediate * (1 - aggregate_mask) else: return per_example_aggregation_intermediate def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask): """ Calculates aggregation loss in the case of answer supervision. Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions Returns: aggregation_loss_unknown (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (in case of answer supervision) per example. """ dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1) # Predict some aggregation in case of an answer that needs aggregation. # This increases the probability of all aggregation functions, in a way # similar to MML, but without considering whether the function gives the # correct answer. return -tf.math.log(aggregation_ops_total_mass) * aggregate_mask def _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight, ): """ Calculates the aggregation loss per example. Args: logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`tf.Tensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`tf.Tensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. aggregation_loss_weight (`float`, *optional*, defaults to 1.0): Importance weight for the aggregation loss. Returns: aggregation_loss (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss per example. """ per_example_aggregation_loss = _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ) if use_answer_as_supervision: # Add aggregation loss for numeric answers that need aggregation. per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask) return aggregation_loss_weight * per_example_aggregation_loss def _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ): """ Calculates the expected result given cell and aggregation probabilities. Args: dist_per_cell (`tfp.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the hyperparameters of the model Returns: expected_result (`tf.Tensor` of shape `(batch_size,)`): The expected result per example. """ if config.use_gumbel_for_cells: gumbel_dist = tfp.distributions.RelaxedBernoulli( # The token logits where already divided by the temperature and used for # computing cell selection errors so we need to multiply it again here config.temperature, logits=dist_per_cell.logits_parameter() * config.temperature, ) scaled_probability_per_cell = gumbel_dist.sample() else: scaled_probability_per_cell = dist_per_cell.probs_parameter() # <float32>[batch_size, seq_length] scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float count_result = tf.reduce_sum(scaled_probability_per_cell, axis=1) numeric_values_masked = tf.where( tf.math.is_nan(numeric_values), tf.zeros_like(numeric_values), numeric_values ) # Mask non-numeric table values to zero. sum_result = tf.reduce_sum(scaled_probability_per_cell * numeric_values_masked, axis=1) avg_approximation = config.average_approximation_function if avg_approximation == AverageApproximationFunction.RATIO: average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION) elif avg_approximation == AverageApproximationFunction.FIRST_ORDER: # The sum of all probabilities exept that correspond to other cells ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1 average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell / ex, axis=1) elif avg_approximation == AverageApproximationFunction.SECOND_ORDER: # The sum of all probabilities exept that correspond to other cells ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1 pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell) var = tf.reduce_sum(pointwise_var, axis=1, keepdims=True) - pointwise_var multiplier = (var / tf.math.square(ex) + 1) / ex average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell * multiplier, axis=1) else: raise ValueError("Invalid average_approximation_function: %s", config.average_approximation_function) if config.use_gumbel_for_aggregation: gumbel_dist = tfp.distributions.RelaxedOneHotCategorical( config.aggregation_temperature, logits=logits_aggregation[:, 1:] ) # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = gumbel_dist.sample() else: # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = stable_softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, axis=-1) all_results = tf.concat( [ tf.expand_dims(sum_result, axis=1), tf.expand_dims(average_result, axis=1), tf.expand_dims(count_result, axis=1), ], axis=1, ) expected_result = tf.reduce_sum(all_results * aggregation_op_only_probs, axis=1) return expected_result def _calculate_regression_loss( answer, aggregate_mask, dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config, ): """ Calculates the regression loss per example. Args: answer (`tf.Tensor` of shape `(batch_size,)`): Answer for every example in the batch. Nan if there is no scalar answer. aggregate_mask (`tf.Tensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the parameters of the model Returns: per_example_answer_loss_scaled (`tf.Tensor` of shape `(batch_size,)`): Scales answer loss for each example in the batch. large_answer_loss_mask (`tf.Tensor` of shape `(batch_size,)`): A mask which is 1 for examples for which their answer loss is larger than the answer_loss_cutoff. """ # float32 (batch_size,) expected_result = _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ) # <float32>[batch_size] answer_masked = tf.where(tf.math.is_nan(answer), tf.zeros_like(answer), answer) if config.use_normalized_answer_loss: normalizer = tf.stop_gradient( tf.math.maximum(tf.math.abs(expected_result), tf.math.abs(answer_masked)) + EPSILON_ZERO_DIVISION ) normalized_answer_masked = answer_masked / normalizer normalized_expected_result = expected_result / normalizer per_example_answer_loss = tf.compat.v1.losses.huber_loss( normalized_answer_masked * aggregate_mask, normalized_expected_result * aggregate_mask, delta=tf.cast(1.0, tf.float32), reduction=tf.losses.Reduction.NONE, ) else: per_example_answer_loss = tf.compat.v1.losses.huber_loss( answer_masked * aggregate_mask, expected_result * aggregate_mask, delta=tf.cast(config.huber_loss_delta, tf.float32), reduction=tf.losses.Reduction.NONE, ) if config.answer_loss_cutoff is None: large_answer_loss_mask = tf.ones_like(per_example_answer_loss, dtype=tf.float32) else: large_answer_loss_mask = tf.where( per_example_answer_loss > config.answer_loss_cutoff, tf.zeros_like(per_example_answer_loss, dtype=tf.float32), tf.ones_like(per_example_answer_loss, dtype=tf.float32), ) per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask
transformers/src/transformers/models/tapas/modeling_tf_tapas.py/0
{ "file_path": "transformers/src/transformers/models/tapas/modeling_tf_tapas.py", "repo_id": "transformers", "token_count": 47207 }
399
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for TrOCR. """ import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class TrOCRProcessor(ProcessorMixin): r""" Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor. [`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and [`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for more information. Args: image_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*): An instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input. tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*): An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self._in_target_context_manager = False def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to AutoImageProcessor's [`~AutoImageProcessor.__call__`] and returns its output. If used in the context [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's [`~TrOCRTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) images = kwargs.pop("images", None) text = kwargs.pop("text", None) if len(args) > 0: images = args[0] args = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: inputs = self.image_processor(images, *args, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR. """ warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.image_processor self._in_target_context_manager = False @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
transformers/src/transformers/models/trocr/processing_trocr.py/0
{ "file_path": "transformers/src/transformers/models/trocr/processing_trocr.py", "repo_id": "transformers", "token_count": 2191 }
400
# coding=utf-8 # Copyright 2023 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch UMT5 model.""" import copy import math from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_umt5 import UMT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "UMT5Config" _CHECKPOINT_FOR_DOC = "google/umt5-small" # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->UMT5 class UMT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the UMT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # UMT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->UMT5 class UMT5DenseActDense(nn.Module): def __init__(self, config: UMT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->UMT5 class UMT5DenseGatedActDense(nn.Module): def __init__(self, config: UMT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->UMT5 class UMT5LayerFF(nn.Module): def __init__(self, config: UMT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = UMT5DenseGatedActDense(config) else: self.DenseReluDense = UMT5DenseActDense(config) self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class UMT5Attention(nn.Module): """ T5's attention using relative_attention_bias. """ def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() def _shape(self, projection: torch.Tensor) -> torch.Tensor: new_projection_shape = projection.size()[:-1] + (self.n_heads, self.key_value_proj_dim) # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D) new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3) return new_projection def _relative_position_bucket(self, relative_position): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 num_buckets = self.relative_attention_num_buckets max_distance = self.relative_attention_max_distance if not self.is_decoder: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance log_ratio = torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) log_ratio = log_ratio * (num_buckets - max_exact) relative_position_if_large = max_exact + log_ratio.to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket(relative_position) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, ): is_cross_attention = encoder_hidden_states is not None batch_size, seq_length = hidden_states.shape[:2] # use encoder_hidden_states if cross attention current_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states # checking that the `sequence_length` of the `past_key_value` is the same as the he provided # `encoder_hidden_states` to support prefix tuning if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] else: key_states = self._shape(self.k(current_states)) value_states = self._shape(self.v(current_states)) if past_key_value is not None and not is_cross_attention: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) query_states = self._shape(self.q(hidden_states)) attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) # compute positional bias if self.has_relative_attention_bias: query_length = seq_length if past_key_value is not None: query_length += past_key_value[0].shape[2] position_bias = self.compute_bias(query_length, key_states.size(2), device=attention_scores.device) else: position_bias = torch.zeros( (1, self.n_heads, seq_length, key_states.size(2)), device=attention_scores.device, dtype=attention_scores.dtype, requires_grad=self.training, ) if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if attention_mask is not None: position_bias = position_bias + attention_mask # (batch_size, n_heads, seq_length, key_length) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) attention_scores += position_bias # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).type_as(attention_scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask # attn_output = torch.bmm(attn_probs, value_states) ? context_states = torch.matmul(attn_weights, value_states) # attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) ? context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1) attn_output = self.o(context_states) return attn_output, attn_weights, past_key_value class UMT5LayerSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.SelfAttention = UMT5Attention(config, has_relative_attention_bias=True) self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, layer_head_mask=None, past_key_value=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, past_key_value=past_key_value, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class UMT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = UMT5Attention(config, has_relative_attention_bias=False) self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, encoder_hidden_states=None, attention_mask=None, layer_head_mask=None, past_key_value=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, past_key_value=past_key_value, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class UMT5Block(nn.Module): def __init__(self, config): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(UMT5LayerSelfAttention(config)) if self.is_decoder: self.layer.append(UMT5LayerCrossAttention(config)) self.layer.append(UMT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None hidden_states, self_attn_weights, present_key_value = self.layer[0]( hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, ) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: max_dtype = torch.finfo(hidden_states.dtype).max clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.layer[1]( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, ) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: max_dtype = torch.finfo(hidden_states.dtype).max clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) present_key_value += cross_attn_present_key_value # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: max_dtype = torch.finfo(hidden_states.dtype).max clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = ( hidden_states, present_key_value, ) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.t5.modeling_t5.T5ClassificationHead with T5->UMT5 class UMT5ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config: UMT5Config): super().__init__() self.dense = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(p=config.classifier_dropout) self.out_proj = nn.Linear(config.d_model, config.num_labels) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class UMT5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = UMT5Config base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["UMT5Block"] _keep_in_fp32_modules = ["wo"] @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, UMT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance( module, ( UMT5Model, UMT5ForConditionalGeneration, UMT5EncoderModel, UMT5ForQuestionAnswering, ), ): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "qa_outputs"): module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) module.qa_outputs.bias.data.zero_() elif isinstance(module, UMT5ForTokenClassification): if hasattr(module, "classifier"): module.classifier.weight.data.normal_(mean=0.0, std=factor * 1.0) module.classifier.bias.data.zero_() elif isinstance(module, UMT5ClassificationHead): module.dense.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.dense, "bias") and module.dense.bias is not None: module.dense.bias.data.zero_() module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None: module.out_proj.bias.data.zero_() elif isinstance(module, UMT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, UMT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, UMT5Attention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In UMT5 it is usually set to the pad_token_id. " "See UMT5 docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class UMT5Stack(UMT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList([UMT5Block(config) for i in range(config.num_layers)]) self.final_layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.is_decoder else None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, extended_attention_mask, encoder_hidden_states, encoder_extended_attention_mask, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if use_cache: present_key_value_states += (layer_outputs[1],) if output_attentions: all_attentions += (layer_outputs[2],) if self.is_decoder: all_cross_attentions += (layer_outputs[3],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) UMT5_START_DOCSTRING = r""" The UMT5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`UMT5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UMT5_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5 Training](./umt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ UMT5_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare UMT5 Model transformer outputting raw hidden-states without any specific head on top.", UMT5_START_DOCSTRING, ) class UMT5Model(UMT5PreTrainedModel): r""" Examples: ```python >>> from transformers import UMT5Model, AutoTokenizer >>> model = UMT5Model.from_pretrained("google/umt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> noisy_text = "UN Offizier sagt, dass weiter <extra_id_0> werden muss in Syrien." >>> label = "<extra_id_0> verhandelt" >>> inputs = tokenizer(inputs, return_tensors="pt") >>> labels = tokenizer(label=label, return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" model_type = "umt5" config_class = UMT5Config _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = UMT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = UMT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.t5.modeling_t5.T5Model.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5Model.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5Model._tie_weights def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) # Copied from transformers.models.t5.modeling_t5.T5Model.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5Model.get_decoder def get_decoder(self): return self.decoder # Copied from transformers.models.t5.modeling_t5.T5Model._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(UMT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, UMT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> model = UMT5Model.from_pretrained("google/umt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for UMT5Model. >>> # This is not needed for torch's UMT5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("""UMT5 Model with a `language modeling` head on top.""", UMT5_START_DOCSTRING) class UMT5ForConditionalGeneration(UMT5PreTrainedModel): r""" Examples: ```python >>> from transformers import UMT5ForConditionalGeneration, AutoTokenizer >>> model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, text_target=summary, return_tensors="pt") >>> outputs = model(**inputs) >>> loss = outputs.loss ```""" model_type = "umt5" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = UMT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = UMT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration._tie_weights def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_output_embeddings def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_output_embeddings def get_output_embeddings(self): return self.lm_head # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_decoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(UMT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, UMT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer("Studies have shown that <extra_id_0> good for you", return_tensors="pt").input_ids >>> outputs = model.generate(input_ids) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.prepare_inputs_for_generation def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, decoder_attention_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return { "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "decoder_attention_mask": decoder_attention_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.prepare_decoder_input_ids_from_labels def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings( "The bare UMT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", UMT5_START_DOCSTRING, ) class UMT5EncoderModel(UMT5PreTrainedModel): r""" Examples: ```python >>> from transformers import UMT5EncoderModel, AutoTokenizer >>> model = UMT5EncoderModel.from_pretrained("google/umt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> input_ids = tokenizer(article, return_tensors="pt").input_ids >>> outputs = model(input_ids) >>> hidden_state = outputs.last_hidden_state ```""" model_type = "umt5" # config_class = UMT5Config _tied_weights_keys = ["encoder.embed_tokens.weight"] def __init__(self, config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = UMT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel._tie_weights def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5EncoderModel._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @add_start_docstrings_to_model_forward(UMT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.forward with T5->UMT5, google-t5/t5-small->google/umt5-small def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, UMT5EncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") >>> model = UMT5EncoderModel.from_pretrained("google/umt5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs @add_start_docstrings( """ UMT5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, UMT5_START_DOCSTRING, ) class UMT5ForSequenceClassification(UMT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] # Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.__init__ with T5->UMT5 def __init__(self, config: UMT5Config): super().__init__(config) self.transformer = UMT5Model(config) self.classification_head = UMT5ClassificationHead(config) # Initialize weights and apply final processing self.post_init() self.model_parallel = False @add_start_docstrings_to_model_forward(UMT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) # Copied from models.bart.modeling_bart.BartModel.forward different to other models, T5 automatically creates # decoder_input_ids from input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = self._shift_right(input_ids) outputs = self.transformer( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") batch_size, _, hidden_size = sequence_output.shape sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ UMT5 Encoder Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, UMT5_START_DOCSTRING, ) class UMT5ForTokenClassification(UMT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["transformer.encoder.embed_tokens.weight"] # Copied from transformers.models.t5.modeling_t5.T5ForTokenClassification.__init__ with T5->UMT5 def __init__(self, config: UMT5Config): super().__init__(config) self.num_labels = config.num_labels self.transformer = UMT5EncoderModel(config) self.dropout = nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UMT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) # Copied from transformers.models.t5.modeling_t5.T5ForTokenClassification.forward with T5->UMT5 def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits, outputs[2:-1]) return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ UMT5 Model with a span classification head on top for extractive question-answering tasks like SQuAD (linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, UMT5_START_DOCSTRING, ) class UMT5ForQuestionAnswering(UMT5PreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = UMT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = UMT5Stack(decoder_config, self.shared) self.num_labels = config.num_labels self.qa_outputs = nn.Linear(config.d_model, config.num_labels) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_input_embeddings def get_input_embeddings(self): return self.shared # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering._tie_weights def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_decoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(UMT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if start_positions is not None and end_positions is not None: use_cache = False # Copied from models.bart.modeling_bart.BartModel.forward # different to other models, T5 automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = self._shift_right(input_ids) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=None, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + decoder_outputs[1:] + encoder_outputs return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, )
transformers/src/transformers/models/umt5/modeling_umt5.py/0
{ "file_path": "transformers/src/transformers/models/umt5/modeling_umt5.py", "repo_id": "transformers", "token_count": 37584 }
401
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """UperNet model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto.configuration_auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class UperNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`UperNetForSemanticSegmentation`]. It is used to instantiate an UperNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UperNet [openmmlab/upernet-convnext-tiny](https://huggingface.co/openmmlab/upernet-convnext-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. hidden_size (`int`, *optional*, defaults to 512): The number of hidden units in the convolutional layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): Pooling scales used in Pooling Pyramid Module applied on the last feature map. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. auxiliary_channels (`int`, *optional*, defaults to 256): Number of channels to use in the auxiliary head. auxiliary_num_convs (`int`, *optional*, defaults to 1): Number of convolutional layers to use in the auxiliary head. auxiliary_concat_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the output of the auxiliary head with the input before the classification layer. loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function. Examples: ```python >>> from transformers import UperNetConfig, UperNetForSemanticSegmentation >>> # Initializing a configuration >>> configuration = UperNetConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = UperNetForSemanticSegmentation(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "upernet" def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, hidden_size=512, initializer_range=0.02, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_in_channels=384, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, loss_ignore_index=255, **kwargs, ): super().__init__(**kwargs) if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.hidden_size = hidden_size self.initializer_range = initializer_range self.pool_scales = pool_scales self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.auxiliary_in_channels = auxiliary_in_channels self.auxiliary_channels = auxiliary_channels self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.loss_ignore_index = loss_ignore_index
transformers/src/transformers/models/upernet/configuration_upernet.py/0
{ "file_path": "transformers/src/transformers/models/upernet/configuration_upernet.py", "repo_id": "transformers", "token_count": 2403 }
402
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for ViT.""" import functools from typing import Dict, List, Optional, Union from ...image_processing_base import BatchFeature from ...image_processing_utils import get_size_dict from ...image_processing_utils_fast import BaseImageProcessorFast, SizeDict from ...image_transforms import FusedRescaleNormalize, NumpyToTensor, Rescale from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, ImageType, PILImageResampling, get_image_type, make_list_of_images, pil_torch_interpolation_mapping, ) from ...utils import TensorType, logging from ...utils.import_utils import is_torch_available, is_torchvision_available logger = logging.get_logger(__name__) if is_torch_available(): import torch if is_torchvision_available(): from torchvision.transforms import Compose, Normalize, PILToTensor, Resize class ViTImageProcessorFast(BaseImageProcessorFast): r""" Constructs a ViT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] _transform_params = [ "do_resize", "do_rescale", "do_normalize", "size", "resample", "rescale_factor", "image_mean", "image_std", "image_type", ] def __init__( self, do_resize: bool = True, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) self.do_resize = do_resize self.do_rescale = do_rescale self.do_normalize = do_normalize self.size = size self.resample = resample self.rescale_factor = rescale_factor self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def _build_transforms( self, do_resize: bool, size: Dict[str, int], resample: PILImageResampling, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Union[float, List[float]], image_std: Union[float, List[float]], image_type: ImageType, ) -> "Compose": """ Given the input settings build the image transforms using `torchvision.transforms.Compose`. """ transforms = [] # All PIL and numpy values need to be converted to a torch tensor # to keep cross compatibility with slow image processors if image_type == ImageType.PIL: transforms.append(PILToTensor()) elif image_type == ImageType.NUMPY: transforms.append(NumpyToTensor()) if do_resize: transforms.append( Resize((size["height"], size["width"]), interpolation=pil_torch_interpolation_mapping[resample]) ) # We can combine rescale and normalize into a single operation for speed if do_rescale and do_normalize: transforms.append(FusedRescaleNormalize(image_mean, image_std, rescale_factor=rescale_factor)) elif do_rescale: transforms.append(Rescale(rescale_factor=rescale_factor)) elif do_normalize: transforms.append(Normalize(image_mean, image_std)) return Compose(transforms) @functools.lru_cache(maxsize=1) def _validate_input_arguments( self, return_tensors: Union[str, TensorType], do_resize: bool, size: Dict[str, int], resample: PILImageResampling, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Union[float, List[float]], image_std: Union[float, List[float]], data_format: Union[str, ChannelDimension], image_type: ImageType, ): if return_tensors != "pt": raise ValueError("Only returning PyTorch tensors is currently supported.") if data_format != ChannelDimension.FIRST: raise ValueError("Only channel first data format is currently supported.") if do_resize and None in (size, resample): raise ValueError("Size and resample must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and None in (image_mean, image_std): raise ValueError("Image mean and standard deviation must be specified if do_normalize is True.") def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = "pt", data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Only "pt" is supported data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. The following formats are currently supported: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale do_normalize = do_normalize if do_normalize is not None else self.do_normalize resample = resample if resample is not None else self.resample rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size # Make hashable for cache size = SizeDict(**size) image_mean = tuple(image_mean) if isinstance(image_mean, list) else image_mean image_std = tuple(image_std) if isinstance(image_std, list) else image_std images = make_list_of_images(images) image_type = get_image_type(images[0]) if image_type not in [ImageType.PIL, ImageType.TORCH, ImageType.NUMPY]: raise ValueError(f"Unsupported input image type {image_type}") self._validate_input_arguments( do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, return_tensors=return_tensors, data_format=data_format, image_type=image_type, ) transforms = self.get_transforms( do_resize=do_resize, do_rescale=do_rescale, do_normalize=do_normalize, size=size, resample=resample, rescale_factor=rescale_factor, image_mean=image_mean, image_std=image_std, image_type=image_type, ) transformed_images = [transforms(image) for image in images] data = {"pixel_values": torch.stack(transformed_images, dim=0)} return BatchFeature(data, tensor_type=return_tensors)
transformers/src/transformers/models/vit/image_processing_vit_fast.py/0
{ "file_path": "transformers/src/transformers/models/vit/image_processing_vit_fast.py", "repo_id": "transformers", "token_count": 5431 }
403
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Wav2Vec2 model configuration""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Wav2Vec2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Wav2Vec2Model`]. It is used to instantiate an Wav2Vec2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2 [facebook/wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base-960h) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the Wav2Vec2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Wav2Vec2Model`] or [`TFWav2Vec2Model`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`Wav2Vec2Model`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`Wav2Vec2ForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for quantized feature encoder states. conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. do_stable_layer_norm (`bool`, *optional*, defaults to `False`): Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is False` corresponds to applying layer norm after the attention layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0),: The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' num_codevectors_per_group (`int`, *optional*, defaults to 320): Number of entries in each quantization codebook (group). num_codevector_groups (`int`, *optional*, defaults to 2): Number of codevector groups for product codevector quantization. contrastive_logits_temperature (`float`, *optional*, defaults to 0.1): The temperature *kappa* in the contrastive loss. feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the output of the feature encoder that's used by the quantizer. num_negatives (`int`, *optional*, defaults to 100): Number of negative samples for the contrastive loss. codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the quantized feature vectors. proj_codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the final projection of both the quantized and the transformer features. diversity_loss_weight (`int`, *optional*, defaults to 0.1): The weight of the codebook diversity loss component. ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`Wav2Vec2ForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`Wav2Vec2ForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`Wav2Vec2ForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. add_adapter (`bool`, *optional*, defaults to `False`): Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for warm-starting Wav2Vec2 for SpeechEncoderDecoder models. adapter_kernel_size (`int`, *optional*, defaults to 3): Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. adapter_stride (`int`, *optional*, defaults to 2): Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. num_adapter_layers (`int`, *optional*, defaults to 3): Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is True`. adapter_attn_dim (`int`, *optional*): Dimension of the attention adapter weights to be used in each attention block. An example of a model using attention adapters is [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all). output_hidden_size (`int`, *optional*): Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant if `add_adapter is True`. Example: ```python >>> from transformers import Wav2Vec2Config, Wav2Vec2Model >>> # Initializing a Wav2Vec2 facebook/wav2vec2-base-960h style configuration >>> configuration = Wav2Vec2Config() >>> # Initializing a model (with random weights) from the facebook/wav2vec2-base-960h style configuration >>> model = Wav2Vec2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "wav2vec2" def __init__( self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction="sum", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=3, output_hidden_size=None, adapter_attn_dim=None, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.use_weighted_layer_sum = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # parameters for pretraining with codevector quantized representations self.num_codevectors_per_group = num_codevectors_per_group self.num_codevector_groups = num_codevector_groups self.contrastive_logits_temperature = contrastive_logits_temperature self.feat_quantizer_dropout = feat_quantizer_dropout self.num_negatives = num_negatives self.codevector_dim = codevector_dim self.proj_codevector_dim = proj_codevector_dim self.diversity_loss_weight = diversity_loss_weight # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # adapter self.add_adapter = add_adapter self.adapter_kernel_size = adapter_kernel_size self.adapter_stride = adapter_stride self.num_adapter_layers = num_adapter_layers self.output_hidden_size = output_hidden_size or hidden_size self.adapter_attn_dim = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. self.classifier_proj_size = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1)
transformers/src/transformers/models/wav2vec2/configuration_wav2vec2.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2/configuration_wav2vec2.py", "repo_id": "transformers", "token_count": 7858 }
404
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Wav2Vec2Conformer checkpoint.""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( Wav2Vec2ConformerConfig, Wav2Vec2ConformerForCTC, Wav2Vec2ConformerForPreTraining, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } TOP_LEVEL_KEYS = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value elif weight_type == "running_mean": hf_pointer.running_mean.data = value elif weight_type == "running_var": hf_pointer.running_var.data = value elif weight_type == "num_batches_tracked": hf_pointer.num_batches_tracked.data = value elif weight_type == "inv_freq": hf_pointer.inv_freq.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights(fairseq_model, hf_model, is_headless): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.wav2vec2_conformer.feature_extractor for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True else: for key, mapped_key in MAPPING.items(): mapped_key = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "pos_bias_u" in name: weight_type = None elif "pos_bias_v" in name: weight_type = None elif "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "bias" in name: weight_type = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj weight_type = "weight" elif "running_mean" in name: weight_type = "running_mean" elif "inv_freq" in name: weight_type = "inv_freq" elif "running_var" in name: weight_type = "running_var" elif "num_batches_tracked" in name: weight_type = "num_batches_tracked" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") # Copied from transformers.models.wav2vec2.convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.load_conv_layer def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) @torch.no_grad() def convert_wav2vec2_conformer_checkpoint( checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True ): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = Wav2Vec2ConformerConfig.from_pretrained(config_path, hidden_act="swish") else: config = Wav2Vec2ConformerConfig() if "rope" in checkpoint_path: config.position_embeddings_type = "rotary" if is_finetuned: if dict_path: target_dict = Dictionary.load(dict_path) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq config.bos_token_id = target_dict.pad_index config.pad_token_id = target_dict.bos_index config.eos_token_id = target_dict.eos_index config.vocab_size = len(target_dict.symbols) vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json") if not os.path.isdir(pytorch_dump_folder_path): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path)) return os.makedirs(pytorch_dump_folder_path, exist_ok=True) vocab_dict = target_dict.indices # fairseq has the <pad> and <s> switched vocab_dict["<pad>"] = 0 vocab_dict["<s>"] = 1 with open(vocab_path, "w", encoding="utf-8") as vocab_handle: json.dump(vocab_dict, vocab_handle) tokenizer = Wav2Vec2CTCTokenizer( vocab_path, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=False, ) return_attention_mask = True if config.feat_extract_norm == "layer" else False feature_extractor = Wav2Vec2FeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=True, return_attention_mask=return_attention_mask, ) processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) processor.save_pretrained(pytorch_dump_folder_path) hf_wav2vec = Wav2Vec2ConformerForCTC(config) else: hf_wav2vec = Wav2Vec2ConformerForPreTraining(config) if is_finetuned: model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])} ) else: task_arg = argparse.Namespace(task="audio_pretraining") task = fairseq.tasks.setup_task(task_arg) model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=task) model = model[0].eval() recursively_load_weights(model, hf_wav2vec, not is_finetuned) hf_wav2vec.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) args = parser.parse_args() convert_wav2vec2_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
transformers/src/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 6286 }
405
# coding=utf-8 # Copyright 2021 The Fairseq Authors The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 XGLM model.""" from __future__ import annotations import math import random from typing import Any, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation # Public API from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions, TFCausalLMOutputWithCrossAttentions from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSharedEmbeddings, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import logging from .configuration_xglm import XGLMConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/xglm-564M" _CONFIG_FOR_DOC = "XGLMConfig" LARGE_NEGATIVE = -1e8 def create_sinusoidal_positions(num_positions: int, embedding_dim: int, padding_idx: Optional[int]) -> tf.Tensor: half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = tf.exp(tf.range(half_dim, dtype=tf.float32) * -emb) emb = tf.expand_dims(tf.range(num_positions, dtype=tf.float32), axis=1) * tf.expand_dims(emb, axis=0) emb = tf.reshape(tf.concat([tf.sin(emb), tf.cos(emb)], axis=1), (num_positions, -1)) if embedding_dim % 2 == 1: # zero pad emb = tf.concat([emb, tf.zeros((num_positions, 1))], axis=1) if padding_idx is not None: _padding_mask = tf.concat( [ tf.ones((padding_idx, shape_list(emb)[1])), tf.zeros((1, shape_list(emb)[1])), tf.ones((shape_list(emb)[0] - padding_idx - 1, shape_list(emb)[1])), ], axis=0, ) emb *= _padding_mask return tf.constant(emb, name="embed_positions") def _create_position_ids_from_input_ids( input_ids: tf.Tensor, past_key_values_length: int, padding_idx: Optional[int] ) -> tf.Tensor: """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = tf.where(input_ids != padding_idx, 1, 0) incremental_indices = (tf.cast(tf.cumsum(mask, axis=1), dtype=mask.dtype) + past_key_values_length) * mask return tf.cast(incremental_indices, dtype=tf.int64) + padding_idx def _create_position_ids_from_inputs_embeds( inputs_embeds: tf.Tensor, past_key_values_length: int, padding_idx: Optional[int] ) -> tf.Tensor: """ Args: We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. inputs_embeds: tf.Tensor Returns: tf.Tensor """ input_shape = shape_list(inputs_embeds)[:-1] sequence_length = input_shape[1] position_ids = tf.range(padding_idx + 1, sequence_length + padding_idx + 1, dtype=tf.int64) return tf.broadcast_to(tf.expand_dims(position_ids, axis=0), input_shape) + past_key_values_length # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->XGLM class TFXGLMAttention(keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFXGLMDecoderLayer(keras.layers.Layer): def __init__(self, config: XGLMConfig, **kwargs: Any) -> None: super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFXGLMAttention( embed_dim=self.embed_dim, num_heads=config.attention_heads, dropout=config.attention_dropout, is_decoder=True, name="self_attn", ) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) if config.add_cross_attention: self.encoder_attn = TFXGLMAttention( embed_dim=self.embed_dim, num_heads=config.attention_heads, dropout=config.attention_dropout, is_decoder=True, name="encoder_attn", ) self.encoder_attn_layer_norm = keras.layers.LayerNormalization( epsilon=1e-5, name="encoder_attn_layer_norm" ) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.fc1 = keras.layers.Dense(config.ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config # Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer.call def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, encoder_hidden_states: tf.Tensor | None = None, encoder_attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, cross_attn_layer_head_mask: tf.Tensor | None = None, past_key_value: Tuple[tf.Tensor] | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)* attention_mask (`tf.Tensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape *(batch, seq_len, embed_dim)* encoder_attention_mask (`tf.Tensor`): encoder attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size *(decoder_attention_heads,)* cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. *(decoder_attention_heads,)* past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "encoder_attn", None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, "encoder_attn_layer_norm", None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) @keras_serializable class TFXGLMMainLayer(keras.layers.Layer): config_class = XGLMConfig def __init__( self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs, **kwargs: Any ) -> None: super().__init__(*inputs, **kwargs) self.config = config self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = TFSharedEmbeddings( config.vocab_size, config.d_model, self.padding_idx, name="embed_tokens" ) self.offset = 2 self._embed_positions_weights = create_sinusoidal_positions( num_positions=config.max_position_embeddings + self.offset, embedding_dim=config.d_model, padding_idx=config.pad_token_id, ) self.dropout = keras.layers.Dropout(config.dropout) self.layers = [TFXGLMDecoderLayer(config, name=f"layers.{i}") for i in range(config.num_layers)] self.layerdrop = config.layerdrop self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") def get_input_embeddings(self) -> TFSharedEmbeddings: return self.embed_tokens def set_input_embeddings(self, value: TFSharedEmbeddings) -> None: self.embed_tokens = value def _prepare_decoder_attention_mask( self, attention_mask: tf.Tensor | None, input_shape: tf.TensorShape, past_key_values_length: int, ) -> tf.Tensor: # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length) combined_attention_mask = tf.cond( input_shape[-1] > 1, lambda: combined_attention_mask, lambda: tf.ones_like(combined_attention_mask) ) if attention_mask is None: return combined_attention_mask expand_attention_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) return expand_attention_mask + combined_attention_mask def embed_positions(self, position_ids: np.ndarray | tf.Tensor | None = None) -> tf.Tensor: position_ids += self.offset positions = tf.gather(self._embed_positions_weights, position_ids, axis=0) return positions @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs: Any, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = tf.shape(input_ids) input_ids = tf.reshape(input_ids, (-1, input_shape[-1])) elif inputs_embeds is not None: input_shape = tf.shape(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if position_ids is None: position_ids = tf.expand_dims( tf.range(past_key_values_length, input_shape[-1] + past_key_values_length), axis=0 ) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(position_ids) hidden_states = tf.cast(inputs_embeds, dtype=tf.float32) + positions hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), message=( f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_value=past_key_value, ) if use_cache: next_decoder_cache += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if encoder_hidden_states is not None: all_cross_attentions += (layer_cross_attn,) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, "embed_tokens", None) is not None: with tf.name_scope(self.embed_tokens.name): self.embed_tokens.build(None) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFXGLMPreTrainedModel(TFPreTrainedModel): config_class = XGLMConfig base_model_prefix = "model" XGLM_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`XGLMConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ XGLM_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(num_layers, attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(num_layers, attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.num_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare XGLM Model transformer outputting raw hidden-states without any specific head on top.", XGLM_START_DOCSTRING, ) class TFXGLMModel(TFXGLMPreTrainedModel): """ Transformer decoder consisting of *config.num_layers* layers. Each layer is a [`TFXGLMDecoderLayer`] Args: config: XGLMConfig embed_tokens: [TFSharedEmbeddings]: output embedding """ def __init__( self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs: Any, **kwargs: Any ) -> None: super().__init__(config, *inputs, **kwargs) self.model = TFXGLMMainLayer(config, embed_tokens=embed_tokens, name="model") @unpack_inputs @add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs: Any, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) @add_start_docstrings( """ The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XGLM_START_DOCSTRING, ) class TFXGLMForCausalLM(TFXGLMPreTrainedModel, TFCausalLanguageModelingLoss): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"model.embed_positions.weights", r"lm_head.weight", ] _keys_to_ignore_on_save = [ r"model.embed_positions.weights", ] def __init__( self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs: Any, **kwargs: Any ) -> None: super().__init__(config, *inputs, **kwargs) self.model = TFXGLMMainLayer(config, embed_tokens=embed_tokens, name="model") self.lm_head = keras.layers.Dense( config.vocab_size, use_bias=False, kernel_initializer=get_initializer(config.init_std), name="lm_head", ) self.config = config def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): # only last token for inputs_ids if past is defined in kwargs if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) position_ids = kwargs.get("position_ids", None) attention_mask = kwargs.get("attention_mask", None) if attention_mask is not None and position_ids is None: position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True) if past_key_values: position_ids = tf.expand_dims(position_ids[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, } @unpack_inputs @add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, cross_attn_head_mask: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs: Any, ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: r""" labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # shift labels to the left and cut last logit token labels = tf.concat( [labels[:, 1:], tf.fill((labels.shape[0], 1), tf.cast(self.config.pad_token_id, labels.dtype))], axis=-1, ) loss = self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build([None, None, self.config.hidden_size]) def tf_to_pt_weight_rename(self, tf_weight): if tf_weight == "lm_head.weight": return tf_weight, "model.embed_tokens.weight" else: return (tf_weight,)
transformers/src/transformers/models/xglm/modeling_tf_xglm.py/0
{ "file_path": "transformers/src/transformers/models/xglm/modeling_tf_xglm.py", "repo_id": "transformers", "token_count": 19987 }
406
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """Tokenization classes for XLM-RoBERTa model.""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlm_roberta import XLMRobertaTokenizer else: XLMRobertaTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} class XLMRobertaTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" XLM-RoBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = XLMRobertaTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
transformers/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py/0
{ "file_path": "transformers/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py", "repo_id": "transformers", "token_count": 3195 }
407
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import sys import warnings from argparse import ArgumentParser from pathlib import Path from packaging import version from .. import AutoFeatureExtractor, AutoImageProcessor, AutoProcessor, AutoTokenizer from ..utils import logging from ..utils.import_utils import is_optimum_available from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import get_preprocessor MIN_OPTIMUM_VERSION = "1.5.0" ENCODER_DECODER_MODELS = ["vision-encoder-decoder"] def export_with_optimum(args): if is_optimum_available(): from optimum.version import __version__ as optimum_version parsed_optimum_version = version.parse(optimum_version) if parsed_optimum_version < version.parse(MIN_OPTIMUM_VERSION): raise RuntimeError( f"transformers.onnx requires optimum >= {MIN_OPTIMUM_VERSION} but {optimum_version} is installed. You " "can upgrade optimum by running: pip install -U optimum[exporters]" ) else: raise RuntimeError( "transformers.onnx requires optimum to run, you can install the library by running: pip install " "optimum[exporters]" ) cmd_line = [ sys.executable, "-m", "optimum.exporters.onnx", f"--model {args.model}", f"--task {args.feature}", f"--framework {args.framework}" if args.framework is not None else "", f"{args.output}", ] proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE) proc.wait() logger.info( "The export was done by optimum.exporters.onnx. We recommend using to use this package directly in future, as " "transformers.onnx is deprecated, and will be removed in v5. You can find more information here: " "https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model." ) def export_with_transformers(args): args.output = args.output if args.output.is_file() else args.output.joinpath("model.onnx") if not args.output.parent.exists(): args.output.parent.mkdir(parents=True) # Allocate the model model = FeaturesManager.get_model_from_feature( args.feature, args.model, framework=args.framework, cache_dir=args.cache_dir ) model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=args.feature) onnx_config = model_onnx_config(model.config) if model_kind in ENCODER_DECODER_MODELS: encoder_model = model.get_encoder() decoder_model = model.get_decoder() encoder_onnx_config = onnx_config.get_encoder_config(encoder_model.config) decoder_onnx_config = onnx_config.get_decoder_config( encoder_model.config, decoder_model.config, feature=args.feature ) if args.opset is None: args.opset = max(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset) if args.opset < min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset): raise ValueError( f"Opset {args.opset} is not sufficient to export {model_kind}. At least " f" {min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)} is required." ) preprocessor = AutoFeatureExtractor.from_pretrained(args.model) onnx_inputs, onnx_outputs = export( preprocessor, encoder_model, encoder_onnx_config, args.opset, args.output.parent.joinpath("encoder_model.onnx"), ) validate_model_outputs( encoder_onnx_config, preprocessor, encoder_model, args.output.parent.joinpath("encoder_model.onnx"), onnx_outputs, args.atol if args.atol else encoder_onnx_config.atol_for_validation, ) preprocessor = AutoTokenizer.from_pretrained(args.model) onnx_inputs, onnx_outputs = export( preprocessor, decoder_model, decoder_onnx_config, args.opset, args.output.parent.joinpath("decoder_model.onnx"), ) validate_model_outputs( decoder_onnx_config, preprocessor, decoder_model, args.output.parent.joinpath("decoder_model.onnx"), onnx_outputs, args.atol if args.atol else decoder_onnx_config.atol_for_validation, ) logger.info( f"All good, model saved at: {args.output.parent.joinpath('encoder_model.onnx').as_posix()}," f" {args.output.parent.joinpath('decoder_model.onnx').as_posix()}" ) else: # Instantiate the appropriate preprocessor if args.preprocessor == "auto": preprocessor = get_preprocessor(args.model) elif args.preprocessor == "tokenizer": preprocessor = AutoTokenizer.from_pretrained(args.model) elif args.preprocessor == "image_processor": preprocessor = AutoImageProcessor.from_pretrained(args.model) elif args.preprocessor == "feature_extractor": preprocessor = AutoFeatureExtractor.from_pretrained(args.model) elif args.preprocessor == "processor": preprocessor = AutoProcessor.from_pretrained(args.model) else: raise ValueError(f"Unknown preprocessor type '{args.preprocessor}'") # Ensure the requested opset is sufficient if args.opset is None: args.opset = onnx_config.default_onnx_opset if args.opset < onnx_config.default_onnx_opset: raise ValueError( f"Opset {args.opset} is not sufficient to export {model_kind}. " f"At least {onnx_config.default_onnx_opset} is required." ) onnx_inputs, onnx_outputs = export( preprocessor, model, onnx_config, args.opset, args.output, ) if args.atol is None: args.atol = onnx_config.atol_for_validation validate_model_outputs(onnx_config, preprocessor, model, args.output, onnx_outputs, args.atol) logger.info(f"All good, model saved at: {args.output.as_posix()}") warnings.warn( "The export was done by transformers.onnx which is deprecated and will be removed in v5. We recommend" " using optimum.exporters.onnx in future. You can find more information here:" " https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model.", FutureWarning, ) def main(): parser = ArgumentParser("Hugging Face Transformers ONNX exporter") parser.add_argument( "-m", "--model", type=str, required=True, help="Model ID on huggingface.co or path on disk to load model from." ) parser.add_argument( "--feature", default="default", help="The type of features to export the model with.", ) parser.add_argument("--opset", type=int, default=None, help="ONNX opset version to export the model with.") parser.add_argument( "--atol", type=float, default=None, help="Absolute difference tolerance when validating the model." ) parser.add_argument( "--framework", type=str, choices=["pt", "tf"], default=None, help=( "The framework to use for the ONNX export." " If not provided, will attempt to use the local checkpoint's original framework" " or what is available in the environment." ), ) parser.add_argument("output", type=Path, help="Path indicating where to store generated ONNX model.") parser.add_argument("--cache_dir", type=str, default=None, help="Path indicating where to store cache.") parser.add_argument( "--preprocessor", type=str, choices=["auto", "tokenizer", "feature_extractor", "image_processor", "processor"], default="auto", help="Which type of preprocessor to use. 'auto' tries to automatically detect it.", ) parser.add_argument( "--export_with_transformers", action="store_true", help=( "Whether to use transformers.onnx instead of optimum.exporters.onnx to perform the ONNX export. It can be " "useful when exporting a model supported in transformers but not in optimum, otherwise it is not " "recommended." ), ) args = parser.parse_args() if args.export_with_transformers or not is_optimum_available(): export_with_transformers(args) else: export_with_optimum(args) if __name__ == "__main__": logger = logging.get_logger("transformers.onnx") # pylint: disable=invalid-name logger.setLevel(logging.INFO) main()
transformers/src/transformers/onnx/__main__.py/0
{ "file_path": "transformers/src/transformers/onnx/__main__.py", "repo_id": "transformers", "token_count": 3988 }
408
from typing import List, Union import numpy as np from ..utils import ( ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import Pipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES logger = logging.get_logger(__name__) # Copied from transformers.pipelines.text_classification.sigmoid def sigmoid(_outputs): return 1.0 / (1.0 + np.exp(-_outputs)) # Copied from transformers.pipelines.text_classification.softmax def softmax(_outputs): maxes = np.max(_outputs, axis=-1, keepdims=True) shifted_exp = np.exp(_outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) # Copied from transformers.pipelines.text_classification.ClassificationFunction class ClassificationFunction(ExplicitEnum): SIGMOID = "sigmoid" SOFTMAX = "softmax" NONE = "none" @add_end_docstrings( build_pipeline_init_args(has_image_processor=True), r""" function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output.""", ) class ImageClassificationPipeline(Pipeline): """ Image classification pipeline using any `AutoModelForImageClassification`. This pipeline predicts the class of an image. Example: ```python >>> from transformers import pipeline >>> classifier = pipeline(model="microsoft/beit-base-patch16-224-pt22k-ft22k") >>> classifier("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") [{'score': 0.442, 'label': 'macaw'}, {'score': 0.088, 'label': 'popinjay'}, {'score': 0.075, 'label': 'parrot'}, {'score': 0.073, 'label': 'parodist, lampooner'}, {'score': 0.046, 'label': 'poll, poll_parrot'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"image-classification"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-classification). """ function_to_apply: ClassificationFunction = ClassificationFunction.NONE def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) def _sanitize_parameters(self, top_k=None, function_to_apply=None, timeout=None): preprocess_params = {} if timeout is not None: preprocess_params["timeout"] = timeout postprocess_params = {} if top_k is not None: postprocess_params["top_k"] = top_k if isinstance(function_to_apply, str): function_to_apply = ClassificationFunction(function_to_apply.lower()) if function_to_apply is not None: postprocess_params["function_to_apply"] = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): """ Assign labels to the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: If this argument is not specified, then it will apply the following functions according to the number of labels: - If the model has a single label, will apply the sigmoid function on the output. - If the model has several labels, will apply the softmax function on the output. Possible values are: - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ return super().__call__(images, **kwargs) def preprocess(self, image, timeout=None): image = load_image(image, timeout=timeout) model_inputs = self.image_processor(images=image, return_tensors=self.framework) if self.framework == "pt": model_inputs = model_inputs.to(self.torch_dtype) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, function_to_apply=None, top_k=5): if function_to_apply is None: if self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels == 1: function_to_apply = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels > 1: function_to_apply = ClassificationFunction.SOFTMAX elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None: function_to_apply = self.model.config.function_to_apply else: function_to_apply = ClassificationFunction.NONE if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels outputs = model_outputs["logits"][0] if self.framework == "pt" and outputs.dtype in (torch.bfloat16, torch.float16): outputs = outputs.to(torch.float32).numpy() else: outputs = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: scores = sigmoid(outputs) elif function_to_apply == ClassificationFunction.SOFTMAX: scores = softmax(outputs) elif function_to_apply == ClassificationFunction.NONE: scores = outputs else: raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}") dict_scores = [ {"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores) ] dict_scores.sort(key=lambda x: x["score"], reverse=True) if top_k is not None: dict_scores = dict_scores[:top_k] return dict_scores
transformers/src/transformers/pipelines/image_classification.py/0
{ "file_path": "transformers/src/transformers/pipelines/image_classification.py", "repo_id": "transformers", "token_count": 3522 }
409
from typing import List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import Pipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES from .pt_utils import KeyDataset logger = logging.get_logger(__name__) @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_image_processor=True)) class VisualQuestionAnsweringPipeline(Pipeline): """ Visual Question Answering pipeline using a `AutoModelForVisualQuestionAnswering`. This pipeline is currently only available in PyTorch. Example: ```python >>> from transformers import pipeline >>> oracle = pipeline(model="dandelin/vilt-b32-finetuned-vqa") >>> image_url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png" >>> oracle(question="What is she wearing ?", image=image_url) [{'score': 0.948, 'answer': 'hat'}, {'score': 0.009, 'answer': 'fedora'}, {'score': 0.003, 'answer': 'clothes'}, {'score': 0.003, 'answer': 'sun hat'}, {'score': 0.002, 'answer': 'nothing'}] >>> oracle(question="What is she wearing ?", image=image_url, top_k=1) [{'score': 0.948, 'answer': 'hat'}] >>> oracle(question="Is this a person ?", image=image_url, top_k=1) [{'score': 0.993, 'answer': 'yes'}] >>> oracle(question="Is this a man ?", image=image_url, top_k=1) [{'score': 0.996, 'answer': 'no'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This visual question answering pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `"visual-question-answering", "vqa"`. The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=visual-question-answering). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES) def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, timeout=None, **kwargs): preprocess_params, postprocess_params = {}, {} if padding is not None: preprocess_params["padding"] = padding if truncation is not None: preprocess_params["truncation"] = truncation if timeout is not None: preprocess_params["timeout"] = timeout if top_k is not None: postprocess_params["top_k"] = top_k return preprocess_params, {}, postprocess_params def __call__( self, image: Union["Image.Image", str, List["Image.Image"], List[str], "KeyDataset"], question: Union[str, List[str]] = None, **kwargs, ): r""" Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed below: - `pipeline(image=image, question=question)` - `pipeline({"image": image, "question": question})` - `pipeline([{"image": image, "question": question}])` - `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])` Args: image (`str`, `List[str]`, `PIL.Image`, `List[PIL.Image]` or `KeyDataset`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions. For dataset: the passed in dataset must be of type `transformers.pipelines.pt_utils.KeyDataset` Example: ```python >>> from transformers.pipelines.pt_utils import KeyDataset >>> from datasets import load_dataset >>> dataset = load_dataset("detection-datasets/coco") >>> oracle(image=KeyDataset(dataset, "image"), question="What's in this image?") ``` question (`str`, `List[str]`): The question(s) asked. If given a single question, it can be broadcasted to multiple images. If multiple images and questions are given, each and every question will be broadcasted to all images (same effect as a Cartesian product) top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ is_dataset = isinstance(image, KeyDataset) is_image_batch = isinstance(image, list) and all(isinstance(item, (Image.Image, str)) for item in image) is_question_batch = isinstance(question, list) and all(isinstance(item, str) for item in question) if isinstance(image, (Image.Image, str)) and isinstance(question, str): inputs = {"image": image, "question": question} elif (is_image_batch or is_dataset) and isinstance(question, str): inputs = [{"image": im, "question": question} for im in image] elif isinstance(image, (Image.Image, str)) and is_question_batch: inputs = [{"image": image, "question": q} for q in question] elif (is_image_batch or is_dataset) and is_question_batch: question_image_pairs = [] for q in question: for im in image: question_image_pairs.append({"image": im, "question": q}) inputs = question_image_pairs else: """ Supports the following format - {"image": image, "question": question} - [{"image": image, "question": question}] - Generator and datasets """ inputs = image results = super().__call__(inputs, **kwargs) return results def preprocess(self, inputs, padding=False, truncation=False, timeout=None): image = load_image(inputs["image"], timeout=timeout) model_inputs = self.tokenizer( inputs["question"], return_tensors=self.framework, padding=padding, truncation=truncation, ) image_features = self.image_processor(images=image, return_tensors=self.framework) if self.framework == "pt": image_features = image_features.to(self.torch_dtype) model_inputs.update(image_features) return model_inputs def _forward(self, model_inputs, **generate_kwargs): if self.model.can_generate(): model_outputs = self.model.generate(**model_inputs, **generate_kwargs) else: model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, top_k=5): if self.model.can_generate(): return [ {"answer": self.tokenizer.decode(output_ids, skip_special_tokens=True).strip()} for output_ids in model_outputs ] else: if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels if self.framework == "pt": probs = model_outputs.logits.sigmoid()[0] scores, ids = probs.topk(top_k) else: raise ValueError(f"Unsupported framework: {self.framework}") scores = scores.tolist() ids = ids.tolist() return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
transformers/src/transformers/pipelines/visual_question_answering.py/0
{ "file_path": "transformers/src/transformers/pipelines/visual_question_answering.py", "repo_id": "transformers", "token_count": 3576 }
410
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from typing import TYPE_CHECKING, Optional from packaging import version from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..utils import is_auto_gptq_available, is_optimum_available, is_torch_available, logging from ..utils.quantization_config import GPTQConfig, QuantizationConfigMixin if is_torch_available(): import torch logger = logging.get_logger(__name__) class GptqHfQuantizer(HfQuantizer): """ Quantizer of the GPTQ method - for GPTQ the quantizer support calibration of the model through `auto_gptq` package. Quantization is done under the hood for users if they load a non-prequantized model. """ requires_calibration = False required_packages = ["optimum", "auto_gptq"] optimum_quantizer = None def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): super().__init__(quantization_config, **kwargs) from optimum.gptq import GPTQQuantizer self.optimum_quantizer = GPTQQuantizer.from_dict(self.quantization_config.to_dict_optimum()) def validate_environment(self, *args, **kwargs): gptq_supports_cpu = version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2") if not gptq_supports_cpu and not torch.cuda.is_available(): raise RuntimeError("GPU is required to quantize or run quantize model.") elif not (is_optimum_available() and is_auto_gptq_available()): raise ImportError( "Loading a GPTQ quantized model requires optimum (`pip install optimum`) and auto-gptq library (`pip install auto-gptq`)" ) elif version.parse(importlib.metadata.version("auto_gptq")) < version.parse("0.4.2"): raise ImportError( "You need a version of auto_gptq >= 0.4.2 to use GPTQ: `pip install --upgrade auto-gptq`" ) def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: torch_dtype = torch.float16 elif torch_dtype != torch.float16: logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with GPTQ.") return torch_dtype def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs): if model.__class__.main_input_name != "input_ids": raise RuntimeError("We can only quantize pure text model.") if self.pre_quantized: model = self.optimum_quantizer.convert_model(model) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): if self.pre_quantized: model = self.optimum_quantizer.post_init_model(model) else: if self.quantization_config.tokenizer is None: self.quantization_config.tokenizer = model.name_or_path self.optimum_quantizer.quantize_model(model, self.quantization_config.tokenizer) model.config.quantization_config = GPTQConfig.from_dict(self.optimum_quantizer.to_dict()) @property def is_trainable(self, model: Optional["PreTrainedModel"] = None): return True @property def is_serializable(self): return True
transformers/src/transformers/quantizers/quantizer_gptq.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_gptq.py", "repo_id": "transformers", "token_count": 1447 }
411
# coding=utf-8 # Copyright 2020-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Callbacks to use with the Trainer class and customize the training loop. """ import dataclasses import json from dataclasses import dataclass from typing import Dict, List, Optional, Union import numpy as np from tqdm.auto import tqdm from .trainer_utils import IntervalStrategy, has_length from .training_args import TrainingArguments from .utils import logging logger = logging.get_logger(__name__) @dataclass class TrainerState: """ A class containing the [`Trainer`] inner state that will be saved along the model and optimizer when checkpointing and passed to the [`TrainerCallback`]. <Tip> In all this class, one step is to be understood as one update step. When using gradient accumulation, one update step may require several forward and backward passes: if you use `gradient_accumulation_steps=n`, then one update step requires going through *n* batches. </Tip> Args: epoch (`float`, *optional*): Only set during training, will represent the epoch the training is at (the decimal part being the percentage of the current epoch completed). global_step (`int`, *optional*, defaults to 0): During training, represents the number of update steps completed. max_steps (`int`, *optional*, defaults to 0): The number of update steps to do during the current training. logging_steps (`int`, *optional*, defaults to 500): Log every X updates steps eval_steps (`int`, *optional*): Run an evaluation every X steps. save_steps (`int`, *optional*, defaults to 500): Save checkpoint every X updates steps. train_batch_size (`int`, *optional*): The batch size for the training dataloader. Only needed when `auto_find_batch_size` has been used. num_input_tokens_seen (`int`, *optional*, defaults to 0): The number of tokens seen during training (number of input tokens, not the number of prediction tokens). total_flos (`float`, *optional*, defaults to 0): The total number of floating operations done by the model since the beginning of training (stored as floats to avoid overflow). log_history (`List[Dict[str, float]]`, *optional*): The list of logs done since the beginning of training. best_metric (`float`, *optional*): When tracking the best model, the value of the best metric encountered so far. best_model_checkpoint (`str`, *optional*): When tracking the best model, the value of the name of the checkpoint for the best model encountered so far. is_local_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. is_world_process_zero (`bool`, *optional*, defaults to `True`): Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be `True` for one process). is_hyper_param_search (`bool`, *optional*, defaults to `False`): Whether we are in the process of a hyper parameter search using Trainer.hyperparameter_search. This will impact the way data will be logged in TensorBoard. stateful_callbacks (`List[StatefulTrainerCallback]`, *optional*): Callbacks attached to the `Trainer` that should have their states be saved or restored. Relevent callbacks should implement a `state` and `from_state` function. """ epoch: Optional[float] = None global_step: int = 0 max_steps: int = 0 logging_steps: int = 500 eval_steps: int = 500 save_steps: int = 500 train_batch_size: int = None num_train_epochs: int = 0 num_input_tokens_seen: int = 0 total_flos: float = 0 log_history: List[Dict[str, float]] = None best_metric: Optional[float] = None best_model_checkpoint: Optional[str] = None is_local_process_zero: bool = True is_world_process_zero: bool = True is_hyper_param_search: bool = False trial_name: str = None trial_params: Dict[str, Union[str, float, int, bool]] = None stateful_callbacks: List["TrainerCallback"] = None def __post_init__(self): if self.log_history is None: self.log_history = [] if self.stateful_callbacks is None: self.stateful_callbacks = {} elif isinstance(self.stateful_callbacks, dict): # We are loading the callbacks in from the state file, no need to process them pass else: # Saveable callbacks get stored as dict of kwargs stateful_callbacks = {} for callback in self.stateful_callbacks: if not isinstance(callback, (ExportableState)): raise TypeError( f"All callbacks passed to be saved must inherit `ExportableState`, but received {type(callback)}" ) name = callback.__class__.__name__ if name in stateful_callbacks: # We can have multiple versions of the same callback # if so, we store them as a list of states to restore if not isinstance(stateful_callbacks[name], list): stateful_callbacks[name] = [stateful_callbacks[name]] stateful_callbacks[name].append(callback.state()) else: stateful_callbacks[name] = callback.state() self.stateful_callbacks = stateful_callbacks def save_to_json(self, json_path: str): """Save the content of this instance in JSON format inside `json_path`.""" json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n" with open(json_path, "w", encoding="utf-8") as f: f.write(json_string) @classmethod def load_from_json(cls, json_path: str): """Create an instance from the content of `json_path`.""" with open(json_path, "r", encoding="utf-8") as f: text = f.read() return cls(**json.loads(text)) class ExportableState: """ A class for objects that include the ability to have its state be saved during `Trainer._save_checkpoint` and loaded back in during `Trainer._load_from_checkpoint`. These must implement a `state` function that gets called during the respective Trainer function call. It should only include parameters and attributes needed to recreate the state at a particular time, to avoid utilizing pickle/maintain standard file IO writing. Example: ```python class EarlyStoppingCallback(TrainerCallback, ExportableState): def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold # early_stopping_patience_counter denotes the number of times validation metrics failed to improve. self.early_stopping_patience_counter = 0 def state(self) -> dict: return { "args": { "early_stopping_patience": self.early_stopping_patience, "early_stopping_threshold": self.early_stopping_threshold, }, "attributes": { "early_stopping_patience_counter": self.early_stopping_patience_counter, } } ```""" def state(self) -> dict: raise NotImplementedError("You must implement a `state` function to utilize this class.") @classmethod def from_state(cls, state): instance = cls(**state["args"]) for k, v in state["attributes"].items(): setattr(instance, k, v) return instance @dataclass class TrainerControl(ExportableState): """ A class that handles the [`Trainer`] control flow. This class is used by the [`TrainerCallback`] to activate some switches in the training loop. Args: should_training_stop (`bool`, *optional*, defaults to `False`): Whether or not the training should be interrupted. If `True`, this variable will not be set back to `False`. The training will just stop. should_epoch_stop (`bool`, *optional*, defaults to `False`): Whether or not the current epoch should be interrupted. If `True`, this variable will be set back to `False` at the beginning of the next epoch. should_save (`bool`, *optional*, defaults to `False`): Whether or not the model should be saved at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_evaluate (`bool`, *optional*, defaults to `False`): Whether or not the model should be evaluated at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. should_log (`bool`, *optional*, defaults to `False`): Whether or not the logs should be reported at this step. If `True`, this variable will be set back to `False` at the beginning of the next step. """ should_training_stop: bool = False should_epoch_stop: bool = False should_save: bool = False should_evaluate: bool = False should_log: bool = False def _new_training(self): """Internal method that resets the variable for a new training.""" self.should_training_stop = False def _new_epoch(self): """Internal method that resets the variable for a new epoch.""" self.should_epoch_stop = False def _new_step(self): """Internal method that resets the variable for a new step.""" self.should_save = False self.should_evaluate = False self.should_log = False def state(self) -> dict: return { "args": { "should_training_stop": self.should_training_stop, "should_epoch_stop": self.should_epoch_stop, "should_save": self.should_save, "should_evaluate": self.should_evaluate, "should_log": self.should_log, }, "attributes": {}, } class TrainerCallback: # no-format """ A class for objects that will inspect the state of the training loop at some events and take some decisions. At each of those events the following arguments are available: Args: args ([`TrainingArguments`]): The training arguments used to instantiate the [`Trainer`]. state ([`TrainerState`]): The current state of the [`Trainer`]. control ([`TrainerControl`]): The object that is returned to the [`Trainer`] and can be used to make some decisions. model ([`PreTrainedModel`] or `torch.nn.Module`): The model being trained. tokenizer ([`PreTrainedTokenizer`]): The tokenizer used for encoding the data. optimizer (`torch.optim.Optimizer`): The optimizer used for the training steps. lr_scheduler (`torch.optim.lr_scheduler.LambdaLR`): The scheduler used for setting the learning rate. train_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for training. eval_dataloader (`torch.utils.data.DataLoader`, *optional*): The current dataloader used for evaluation. metrics (`Dict[str, float]`): The metrics computed by the last evaluation phase. Those are only accessible in the event `on_evaluate`. logs (`Dict[str, float]`): The values to log. Those are only accessible in the event `on_log`. The `control` object is the only one that can be changed by the callback, in which case the event that changes it should return the modified version. The argument `args`, `state` and `control` are positionals for all events, all the others are grouped in `kwargs`. You can unpack the ones you need in the signature of the event using them. As an example, see the code of the simple [`~transformers.PrinterCallback`]. Example: ```python class PrinterCallback(TrainerCallback): def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs) ```""" def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of the initialization of the [`Trainer`]. """ pass def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of training. """ pass def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of training. """ pass def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of an epoch. """ pass def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of an epoch. """ pass def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the beginning of a training step. If using gradient accumulation, one training step might take several inputs. """ pass def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after the optimizer step but before gradients are zeroed out. Useful for monitoring gradients. """ pass def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of an substep during gradient accumulation. """ pass def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called at the end of a training step. If using gradient accumulation, one training step might take several inputs. """ pass def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after an evaluation phase. """ pass def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs): """ Event called after a successful prediction. """ pass def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after a checkpoint save. """ pass def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after logging the last logs. """ pass def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): """ Event called after a prediction step. """ pass class CallbackHandler(TrainerCallback): """Internal class that just calls the list of callbacks in order.""" def __init__(self, callbacks, model, tokenizer, optimizer, lr_scheduler): self.callbacks = [] for cb in callbacks: self.add_callback(cb) self.model = model self.tokenizer = tokenizer self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.train_dataloader = None self.eval_dataloader = None if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks): logger.warning( "The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n" + "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of" + "callbacks is\n:" + self.callback_list ) def add_callback(self, callback): cb = callback() if isinstance(callback, type) else callback cb_class = callback if isinstance(callback, type) else callback.__class__ if cb_class in [c.__class__ for c in self.callbacks]: logger.warning( f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current" + "list of callbacks is\n:" + self.callback_list ) self.callbacks.append(cb) def pop_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return cb else: for cb in self.callbacks: if cb == callback: self.callbacks.remove(cb) return cb def remove_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return else: self.callbacks.remove(callback) @property def callback_list(self): return "\n".join(cb.__class__.__name__ for cb in self.callbacks) def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_init_end", args, state, control) def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_training_stop = False return self.call_event("on_train_begin", args, state, control) def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_train_end", args, state, control) def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_epoch_stop = False return self.call_event("on_epoch_begin", args, state, control) def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_epoch_end", args, state, control) def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_log = False control.should_evaluate = False control.should_save = False return self.call_event("on_step_begin", args, state, control) def on_optimizer_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_optimizer_step", args, state, control) def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_substep_end", args, state, control) def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_step_end", args, state, control) def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): control.should_evaluate = False return self.call_event("on_evaluate", args, state, control, metrics=metrics) def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): return self.call_event("on_predict", args, state, control, metrics=metrics) def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_save = False return self.call_event("on_save", args, state, control) def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs): control.should_log = False return self.call_event("on_log", args, state, control, logs=logs) def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_prediction_step", args, state, control) def call_event(self, event, args, state, control, **kwargs): for callback in self.callbacks: result = getattr(callback, event)( args, state, control, model=self.model, tokenizer=self.tokenizer, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, train_dataloader=self.train_dataloader, eval_dataloader=self.eval_dataloader, **kwargs, ) # A Callback can skip the return of `control` if it doesn't change it. if result is not None: control = result return control class DefaultFlowCallback(TrainerCallback): """ A [`TrainerCallback`] that handles the default flow of the training loop for logs, evaluation and checkpoints. """ def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): # Log if state.global_step == 1 and args.logging_first_step: control.should_log = True if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % state.logging_steps == 0: control.should_log = True # Evaluate if ( args.eval_strategy == IntervalStrategy.STEPS and state.global_step % state.eval_steps == 0 and args.eval_delay <= state.global_step ): control.should_evaluate = True # Save if ( args.save_strategy == IntervalStrategy.STEPS and state.save_steps > 0 and state.global_step % state.save_steps == 0 ): control.should_save = True # End training if state.global_step >= state.max_steps: control.should_training_stop = True # Save the model at the end if we have a save strategy if args.save_strategy != IntervalStrategy.NO: control.should_save = True return control def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): # Log if args.logging_strategy == IntervalStrategy.EPOCH: control.should_log = True # Evaluate if args.eval_strategy == IntervalStrategy.EPOCH and args.eval_delay <= state.epoch: control.should_evaluate = True # Save if args.save_strategy == IntervalStrategy.EPOCH: control.should_save = True return control class ProgressCallback(TrainerCallback): """ A [`TrainerCallback`] that displays the progress of training or evaluation. """ def __init__(self): self.training_bar = None self.prediction_bar = None def on_train_begin(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar = tqdm(total=state.max_steps, dynamic_ncols=True) self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.update(state.global_step - self.current_step) self.current_step = state.global_step def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_world_process_zero and has_length(eval_dataloader): if self.prediction_bar is None: self.prediction_bar = tqdm( total=len(eval_dataloader), leave=self.training_bar is None, dynamic_ncols=True ) self.prediction_bar.update(1) def on_evaluate(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_predict(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_world_process_zero and self.training_bar is not None: # make a shallow copy of logs so we can mutate the fields copied # but avoid doing any value pickling. shallow_logs = {} for k, v in logs.items(): shallow_logs[k] = v _ = shallow_logs.pop("total_flos", None) # round numbers so that it looks better in console if "epoch" in shallow_logs: shallow_logs["epoch"] = round(shallow_logs["epoch"], 2) self.training_bar.write(str(shallow_logs)) def on_train_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.close() self.training_bar = None class PrinterCallback(TrainerCallback): """ A bare [`TrainerCallback`] that just prints the logs. """ def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs) class EarlyStoppingCallback(TrainerCallback, ExportableState): """ A [`TrainerCallback`] that handles early stopping. Args: early_stopping_patience (`int`): Use with `metric_for_best_model` to stop training when the specified metric worsens for `early_stopping_patience` evaluation calls. early_stopping_threshold(`float`, *optional*): Use with TrainingArguments `metric_for_best_model` and `early_stopping_patience` to denote how much the specified metric must improve to satisfy early stopping conditions. ` This callback depends on [`TrainingArguments`] argument *load_best_model_at_end* functionality to set best_metric in [`TrainerState`]. Note that if the [`TrainingArguments`] argument *save_steps* differs from *eval_steps*, the early stopping will not occur until the next save step. """ def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold # early_stopping_patience_counter denotes the number of times validation metrics failed to improve. self.early_stopping_patience_counter = 0 def check_metric_value(self, args, state, control, metric_value): # best_metric is set by code for load_best_model operator = np.greater if args.greater_is_better else np.less if state.best_metric is None or ( operator(metric_value, state.best_metric) and abs(metric_value - state.best_metric) > self.early_stopping_threshold ): self.early_stopping_patience_counter = 0 else: self.early_stopping_patience_counter += 1 def on_train_begin(self, args, state, control, **kwargs): assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True" assert ( args.metric_for_best_model is not None ), "EarlyStoppingCallback requires metric_for_best_model is defined" assert ( args.eval_strategy != IntervalStrategy.NO ), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch" def on_evaluate(self, args, state, control, metrics, **kwargs): metric_to_check = args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" metric_value = metrics.get(metric_to_check) if metric_value is None: logger.warning( f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping" " is disabled" ) return self.check_metric_value(args, state, control, metric_value) if self.early_stopping_patience_counter >= self.early_stopping_patience: control.should_training_stop = True def state(self) -> dict: return { "args": { "early_stopping_patience": self.early_stopping_patience, "early_stopping_threshold": self.early_stopping_threshold, }, "attributes": { "early_stopping_patience_counter": self.early_stopping_patience_counter, }, }
transformers/src/transformers/trainer_callback.py/0
{ "file_path": "transformers/src/transformers/trainer_callback.py", "repo_id": "transformers", "token_count": 11993 }
412
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class FlaxForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxForceTokensLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGenerationMixin(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsProcessorList(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSuppressTokensLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTemperatureLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTopKLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTopPLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperTimeStampLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = None FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_MASKED_LM_MAPPING = None FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None FLAX_MODEL_FOR_PRETRAINING_MAPPING = None FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = None FLAX_MODEL_MAPPING = None class FlaxAutoModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForVision2Seq(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartDecoderPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitForMaskedImageModeling(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBloomForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBloomModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBloomPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextModelWithProjection(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPVisionModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPVisionPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDinov2ForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDinov2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDinov2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGemmaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGemmaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGemmaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2LMHeadModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLlamaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLlamaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLlamaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianMTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMistralForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMistralModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMistralPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5EncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRegNetForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRegNetModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRegNetPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxResNetForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxResNetModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxResNetPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSpeechEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5EncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxVisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxVisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2ForCTC(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2ForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperForAudioClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"])
transformers/src/transformers/utils/dummy_flax_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_flax_objects.py", "repo_id": "transformers", "token_count": 14238 }
413
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Hub utilities: utilities related to download and cache models """ import json import os import re import shutil import sys import tempfile import traceback import warnings from concurrent import futures from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( _CACHED_NO_EXIST, CommitOperationAdd, ModelCard, ModelCardData, constants, create_branch, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, try_to_load_from_cache, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, GatedRepoError, HfHubHTTPError, HFValidationError, LocalEntryNotFoundError, OfflineModeIsEnabled, RepositoryNotFoundError, RevisionNotFoundError, build_hf_headers, get_session, hf_raise_for_status, send_telemetry, ) from huggingface_hub.utils._deprecation import _deprecate_method from requests.exceptions import HTTPError from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) from .logging import tqdm logger = logging.get_logger(__name__) # pylint: disable=invalid-name _is_offline_mode = huggingface_hub.constants.HF_HUB_OFFLINE def is_offline_mode(): return _is_offline_mode torch_cache_home = os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) default_cache_path = constants.default_cache_path old_default_cache_path = os.path.join(torch_cache_home, "transformers") # Determine default cache directory. Lots of legacy environment variables to ensure backward compatibility. # The best way to set the cache path is with the environment variable HF_HOME. For more details, checkout this # documentation page: https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables. # # In code, use `HF_HUB_CACHE` as the default cache path. This variable is set by the library and is guaranteed # to be set to the right value. # # TODO: clean this for v5? PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", constants.HF_HUB_CACHE) PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) # Onetime move from the old location to the new one if no ENV variable has been set. if ( os.path.isdir(old_default_cache_path) and not os.path.isdir(constants.HF_HUB_CACHE) and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ and "TRANSFORMERS_CACHE" not in os.environ ): logger.warning( "In Transformers v4.22.0, the default path to cache downloaded models changed from" " '~/.cache/torch/transformers' to '~/.cache/huggingface/hub'. Since you don't seem to have" " overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to" " '~/.cache/huggingface/hub' to avoid redownloading models you have already in the cache. You should" " only see this message once." ) shutil.move(old_default_cache_path, constants.HF_HUB_CACHE) HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(constants.HF_HOME, "modules")) TRANSFORMERS_DYNAMIC_MODULE_NAME = "transformers_modules" SESSION_ID = uuid4().hex # Add deprecation warning for old environment variables. for key in ("PYTORCH_PRETRAINED_BERT_CACHE", "PYTORCH_TRANSFORMERS_CACHE", "TRANSFORMERS_CACHE"): if os.getenv(key) is not None: warnings.warn( f"Using `{key}` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.", FutureWarning, ) S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" _staging_mode = os.environ.get("HUGGINGFACE_CO_STAGING", "NO").upper() in ENV_VARS_TRUE_VALUES _default_endpoint = "https://hub-ci.huggingface.co" if _staging_mode else "https://huggingface.co" HUGGINGFACE_CO_RESOLVE_ENDPOINT = _default_endpoint if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None: warnings.warn( "Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in " "Transformers v5. Use `HF_ENDPOINT` instead.", FutureWarning, ) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", HUGGINGFACE_CO_RESOLVE_ENDPOINT) HUGGINGFACE_CO_PREFIX = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/{model_id}/resolve/{revision}/{filename}" HUGGINGFACE_CO_EXAMPLES_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/examples" def _get_cache_file_to_return( path_or_repo_id: str, full_filename: str, cache_dir: Union[str, Path, None] = None, revision: Optional[str] = None ): # We try to see if we have a cached version (not up to date): resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: return resolved_file return None def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") # TODO: remove this once fully deprecated # TODO? remove from './examples/research_projects/lxmert/utils.py' as well # TODO? remove from './examples/research_projects/visual_bert/utils.py' as well @_deprecate_method(version="4.39.0", message="This method is outdated and does not support the new cache system.") def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: """ Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with *.bin* are added. Args: cache_dir (`Union[str, Path]`, *optional*): The cache directory to search for models within. Will default to the transformers cache if unset. Returns: List[Tuple]: List of tuples each with shape `(model_url, etag, size_MB)` """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE elif isinstance(cache_dir, Path): cache_dir = str(cache_dir) if not os.path.isdir(cache_dir): return [] cached_models = [] for file in os.listdir(cache_dir): if file.endswith(".json"): meta_path = os.path.join(cache_dir, file) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] if url.endswith(".bin"): size_MB = os.path.getsize(meta_path.strip(".json")) / 1e6 cached_models.append((url, etag, size_MB)) return cached_models def define_sagemaker_information(): try: instance_data = requests.get(os.environ["ECS_CONTAINER_METADATA_URI"]).json() dlc_container_used = instance_data["Image"] dlc_tag = instance_data["Image"].split(":")[1] except Exception: dlc_container_used = None dlc_tag = None sagemaker_params = json.loads(os.getenv("SM_FRAMEWORK_PARAMS", "{}")) runs_distributed_training = True if "sagemaker_distributed_dataparallel_enabled" in sagemaker_params else False account_id = os.getenv("TRAINING_JOB_ARN").split(":")[4] if "TRAINING_JOB_ARN" in os.environ else None sagemaker_object = { "sm_framework": os.getenv("SM_FRAMEWORK_MODULE", None), "sm_region": os.getenv("AWS_REGION", None), "sm_number_gpu": os.getenv("SM_NUM_GPUS", 0), "sm_number_cpu": os.getenv("SM_NUM_CPUS", 0), "sm_distributed_training": runs_distributed_training, "sm_deep_learning_container": dlc_container_used, "sm_deep_learning_container_tag": dlc_tag, "sm_account_id": account_id, } return sagemaker_object def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: """ Formats a user-agent string with basic info about a request. """ ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_tf_available(): ua += f"; tensorflow/{_tf_version}" if constants.HF_HUB_DISABLE_TELEMETRY: return ua + "; telemetry/off" if is_training_run_on_sagemaker(): ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items()) # CI will set this value to True if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent return ua def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]) -> Optional[str]: """ Extracts the commit hash from a resolved filename toward a cache file. """ if resolved_file is None or commit_hash is not None: return commit_hash resolved_file = str(Path(resolved_file).as_posix()) search = re.search(r"snapshots/([^/]+)/", resolved_file) if search is None: return None commit_hash = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None def cached_file( path_or_repo_id: Union[str, os.PathLike], filename: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: Optional[bool] = None, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", repo_type: Optional[str] = None, user_agent: Optional[Union[str, Dict[str, str]]] = None, _raise_exceptions_for_gated_repo: bool = True, _raise_exceptions_for_missing_entries: bool = True, _raise_exceptions_for_connection_errors: bool = True, _commit_hash: Optional[str] = None, **deprecated_kwargs, ) -> Optional[str]: """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo_id (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. repo_type (`str`, *optional*): Specify the repo type (useful when downloading from a space for instance). <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo). Examples: ```python # Download a model weight from the Hub and cache it. model_weights_file = cached_file("google-bert/bert-base-uncased", "pytorch_model.bin") ``` """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token # Private arguments # _raise_exceptions_for_gated_repo: if False, do not raise an exception for gated repo error but return # None. # _raise_exceptions_for_missing_entries: if False, do not raise an exception for missing entries but return # None. # _raise_exceptions_for_connection_errors: if False, do not raise an exception for connection errors but return # None. # _commit_hash: passed when we are chaining several calls to various files (e.g. when loading a tokenizer or # a pipeline). If files are cached for this commit hash, avoid calls to head and get from the cache. if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True if subfolder is None: subfolder = "" path_or_repo_id = str(path_or_repo_id) full_filename = os.path.join(subfolder, filename) if os.path.isdir(path_or_repo_id): resolved_file = os.path.join(os.path.join(path_or_repo_id, subfolder), filename) if not os.path.isfile(resolved_file): if _raise_exceptions_for_missing_entries: raise EnvironmentError( f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " f"'https://huggingface.co/{path_or_repo_id}/tree/{revision}' for available files." ) else: return None return resolved_file if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if _commit_hash is not None and not force_download: # If the file is cached under that commit hash, we return it directly. resolved_file = try_to_load_from_cache( path_or_repo_id, full_filename, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type ) if resolved_file is not None: if resolved_file is not _CACHED_NO_EXIST: return resolved_file elif not _raise_exceptions_for_missing_entries: return None else: raise EnvironmentError(f"Could not locate {full_filename} inside {path_or_repo_id}.") user_agent = http_user_agent(user_agent) try: # Load from URL or cache if already cached resolved_file = hf_hub_download( path_or_repo_id, filename, subfolder=None if len(subfolder) == 0 else subfolder, repo_type=repo_type, revision=revision, cache_dir=cache_dir, user_agent=user_agent, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only, ) except GatedRepoError as e: resolved_file = _get_cache_file_to_return(path_or_repo_id, full_filename, cache_dir, revision) if resolved_file is not None or not _raise_exceptions_for_gated_repo: return resolved_file raise EnvironmentError( "You are trying to access a gated repo.\nMake sure to have access to it at " f"https://huggingface.co/{path_or_repo_id}.\n{str(e)}" ) from e except RepositoryNotFoundError as e: raise EnvironmentError( f"{path_or_repo_id} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token " "having permission to this repo either by logging in with `huggingface-cli login` or by passing " "`token=<your_token>`" ) from e except RevisionNotFoundError as e: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists " "for this model name. Check the model page at " f"'https://huggingface.co/{path_or_repo_id}' for available revisions." ) from e except LocalEntryNotFoundError as e: resolved_file = _get_cache_file_to_return(path_or_repo_id, full_filename, cache_dir, revision) if ( resolved_file is not None or not _raise_exceptions_for_missing_entries or not _raise_exceptions_for_connection_errors ): return resolved_file raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this file, couldn't find it in the" f" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named" f" {full_filename}.\nCheckout your internet connection or see how to run the library in offline mode at" " 'https://huggingface.co/docs/transformers/installation#offline-mode'." ) from e except EntryNotFoundError as e: if not _raise_exceptions_for_missing_entries: return None if revision is None: revision = "main" raise EnvironmentError( f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " f"'https://huggingface.co/{path_or_repo_id}/tree/{revision}' for available files." ) from e except HTTPError as err: resolved_file = _get_cache_file_to_return(path_or_repo_id, full_filename, cache_dir, revision) if resolved_file is not None or not _raise_exceptions_for_connection_errors: return resolved_file raise EnvironmentError(f"There was a specific connection error when trying to load {path_or_repo_id}:\n{err}") except HFValidationError as e: raise EnvironmentError( f"Incorrect path_or_model_id: '{path_or_repo_id}'. Please provide either the path to a local folder or the repo_id of a model on the Hub." ) from e return resolved_file # TODO: deprecate `get_file_from_repo` or document it differently? # Docstring is exactly the same as `cached_repo` but behavior is slightly different. If file is missing or if # there is a connection error, `cached_repo` will return None while `get_file_from_repo` will raise an error. # IMO we should keep only 1 method and have a single `raise_error` argument (to be discussed). def get_file_from_repo( path_or_repo: Union[str, os.PathLike], filename: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: Optional[bool] = None, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", **deprecated_kwargs, ): """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the file does not exist. Examples: ```python # Download a tokenizer configuration from huggingface.co and cache. tokenizer_config = get_file_from_repo("google-bert/bert-base-uncased", "tokenizer_config.json") # This model does not have a tokenizer config so the result will be None. tokenizer_config = get_file_from_repo("FacebookAI/xlm-roberta-base", "tokenizer_config.json") ``` """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token return cached_file( path_or_repo_id=path_or_repo, filename=filename, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) def download_url(url, proxies=None): """ Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is for deprecated behavior allowing to download config/models with a single url instead of using the Hub. Args: url (`str`): The url of the file to download. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. Returns: `str`: The location of the temporary file where the url was downloaded. """ warnings.warn( f"Using `from_pretrained` with the url of a file (here {url}) is deprecated and won't be possible anymore in" " v5 of Transformers. You should host your file on the Hub (hf.co) instead and use the repository ID. Note" " that this is not compatible with the caching system (your file will be downloaded at each execution) or" " multiple processes (each process will download the file in a different temporary file).", FutureWarning, ) tmp_fd, tmp_file = tempfile.mkstemp() with os.fdopen(tmp_fd, "wb") as f: http_get(url, f, proxies=proxies) return tmp_file def has_file( path_or_repo: Union[str, os.PathLike], filename: str, revision: Optional[str] = None, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, *, local_files_only: bool = False, cache_dir: Union[str, Path, None] = None, repo_type: Optional[str] = None, **deprecated_kwargs, ): """ Checks if a repo contains a given file without downloading it. Works for remote repos and local folders. If offline mode is enabled, checks if the file exists in the cache. <Tip warning={false}> This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for this repo, but will return False for regular connection errors. </Tip> """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token # If path to local directory, check if the file exists if os.path.isdir(path_or_repo): return os.path.isfile(os.path.join(path_or_repo, filename)) # Else it's a repo => let's check if the file exists in local cache or on the Hub # Check if file exists in cache # This information might be outdated so it's best to also make a HEAD call (if allowed). cached_path = try_to_load_from_cache( repo_id=path_or_repo, filename=filename, revision=revision, repo_type=repo_type, cache_dir=cache_dir, ) has_file_in_cache = isinstance(cached_path, str) # If local_files_only, don't try the HEAD call if local_files_only: return has_file_in_cache # Check if the file exists try: response = get_session().head( hf_hub_url(path_or_repo, filename=filename, revision=revision, repo_type=repo_type), headers=build_hf_headers(token=token, user_agent=http_user_agent()), allow_redirects=False, proxies=proxies, timeout=10, ) except (requests.exceptions.SSLError, requests.exceptions.ProxyError): # Actually raise for those subclasses of ConnectionError raise except ( requests.exceptions.ConnectionError, requests.exceptions.Timeout, OfflineModeIsEnabled, ): return has_file_in_cache try: hf_raise_for_status(response) return True except GatedRepoError as e: logger.error(e) raise EnvironmentError( f"{path_or_repo} is a gated repository. Make sure to request access at " f"https://huggingface.co/{path_or_repo} and pass a token having permission to this repo either by " "logging in with `huggingface-cli login` or by passing `token=<your_token>`." ) from e except RepositoryNotFoundError as e: logger.error(e) raise EnvironmentError( f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'." ) from e except RevisionNotFoundError as e: logger.error(e) raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this " f"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions." ) from e except EntryNotFoundError: return False # File does not exist except requests.HTTPError: # Any authentication/authorization error will be caught here => default to cache return has_file_in_cache class PushToHubMixin: """ A Mixin containing the functionality to push a model or tokenizer to the hub. """ def _create_repo( self, repo_id: str, private: Optional[bool] = None, token: Optional[Union[bool, str]] = None, repo_url: Optional[str] = None, organization: Optional[str] = None, ) -> str: """ Create the repo if needed, cleans up repo_id with deprecated kwargs `repo_url` and `organization`, retrieves the token. """ if repo_url is not None: warnings.warn( "The `repo_url` argument is deprecated and will be removed in v5 of Transformers. Use `repo_id` " "instead." ) if repo_id is not None: raise ValueError( "`repo_id` and `repo_url` are both specified. Please set only the argument `repo_id`." ) repo_id = repo_url.replace(f"{HUGGINGFACE_CO_RESOLVE_ENDPOINT}/", "") if organization is not None: warnings.warn( "The `organization` argument is deprecated and will be removed in v5 of Transformers. Set your " "organization directly in the `repo_id` passed instead (`repo_id={organization}/{model_id}`)." ) if not repo_id.startswith(organization): if "/" in repo_id: repo_id = repo_id.split("/")[-1] repo_id = f"{organization}/{repo_id}" url = create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True) return url.repo_id def _get_files_timestamps(self, working_dir: Union[str, os.PathLike]): """ Returns the list of files with their last modification timestamp. """ return {f: os.path.getmtime(os.path.join(working_dir, f)) for f in os.listdir(working_dir)} def _upload_modified_files( self, working_dir: Union[str, os.PathLike], repo_id: str, files_timestamps: Dict[str, float], commit_message: Optional[str] = None, token: Optional[Union[bool, str]] = None, create_pr: bool = False, revision: str = None, commit_description: str = None, ): """ Uploads all modified files in `working_dir` to `repo_id`, based on `files_timestamps`. """ if commit_message is None: if "Model" in self.__class__.__name__: commit_message = "Upload model" elif "Config" in self.__class__.__name__: commit_message = "Upload config" elif "Tokenizer" in self.__class__.__name__: commit_message = "Upload tokenizer" elif "FeatureExtractor" in self.__class__.__name__: commit_message = "Upload feature extractor" elif "Processor" in self.__class__.__name__: commit_message = "Upload processor" else: commit_message = f"Upload {self.__class__.__name__}" modified_files = [ f for f in os.listdir(working_dir) if f not in files_timestamps or os.path.getmtime(os.path.join(working_dir, f)) > files_timestamps[f] ] # filter for actual files + folders at the root level modified_files = [ f for f in modified_files if os.path.isfile(os.path.join(working_dir, f)) or os.path.isdir(os.path.join(working_dir, f)) ] operations = [] # upload standalone files for file in modified_files: if os.path.isdir(os.path.join(working_dir, file)): # go over individual files of folder for f in os.listdir(os.path.join(working_dir, file)): operations.append( CommitOperationAdd( path_or_fileobj=os.path.join(working_dir, file, f), path_in_repo=os.path.join(file, f) ) ) else: operations.append( CommitOperationAdd(path_or_fileobj=os.path.join(working_dir, file), path_in_repo=file) ) if revision is not None: try: create_branch(repo_id=repo_id, branch=revision, token=token, exist_ok=True) except HfHubHTTPError as e: if e.response.status_code == 403 and create_pr: # If we are creating a PR on a repo we don't have access to, we can't create the branch. # so let's assume the branch already exists. If it's not the case, an error will be raised when # calling `create_commit` below. pass else: raise logger.info(f"Uploading the following files to {repo_id}: {','.join(modified_files)}") return create_commit( repo_id=repo_id, operations=operations, commit_message=commit_message, commit_description=commit_description, token=token, create_pr=create_pr, revision=revision, ) def push_to_hub( self, repo_id: str, use_temp_dir: Optional[bool] = None, commit_message: Optional[str] = None, private: Optional[bool] = None, token: Optional[Union[bool, str]] = None, max_shard_size: Optional[Union[int, str]] = "5GB", create_pr: bool = False, safe_serialization: bool = True, revision: str = None, commit_description: str = None, tags: Optional[List[str]] = None, **deprecated_kwargs, ) -> str: """ Upload the {object_files} to the 🤗 Model Hub. Parameters: repo_id (`str`): The name of the repository you want to push your {object} to. It should contain your organization name when pushing to a given organization. use_temp_dir (`bool`, *optional*): Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to `True` if there is no directory named like `repo_id`, `False` otherwise. commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload {object}"`. private (`bool`, *optional*): Whether or not the repository created should be private. token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` is not specified. max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). We default it to `"5GB"` so that users can easily load models on free-tier Google Colab instances without any CPU OOM issues. create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. safe_serialization (`bool`, *optional*, defaults to `True`): Whether or not to convert the model weights in safetensors format for safer serialization. revision (`str`, *optional*): Branch to push the uploaded files to. commit_description (`str`, *optional*): The description of the commit that will be created tags (`List[str]`, *optional*): List of tags to push on the Hub. Examples: ```python from transformers import {object_class} {object} = {object_class}.from_pretrained("google-bert/bert-base-cased") # Push the {object} to your namespace with the name "my-finetuned-bert". {object}.push_to_hub("my-finetuned-bert") # Push the {object} to an organization with the name "my-finetuned-bert". {object}.push_to_hub("huggingface/my-finetuned-bert") ``` """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) ignore_metadata_errors = deprecated_kwargs.pop("ignore_metadata_errors", False) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token repo_path_or_name = deprecated_kwargs.pop("repo_path_or_name", None) if repo_path_or_name is not None: # Should use `repo_id` instead of `repo_path_or_name`. When using `repo_path_or_name`, we try to infer # repo_id from the folder path, if it exists. warnings.warn( "The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use " "`repo_id` instead.", FutureWarning, ) if repo_id is not None: raise ValueError( "`repo_id` and `repo_path_or_name` are both specified. Please set only the argument `repo_id`." ) if os.path.isdir(repo_path_or_name): # repo_path: infer repo_id from the path repo_id = repo_id.split(os.path.sep)[-1] working_dir = repo_id else: # repo_name: use it as repo_id repo_id = repo_path_or_name working_dir = repo_id.split("/")[-1] else: # Repo_id is passed correctly: infer working_dir from it working_dir = repo_id.split("/")[-1] # Deprecation warning will be sent after for repo_url and organization repo_url = deprecated_kwargs.pop("repo_url", None) organization = deprecated_kwargs.pop("organization", None) repo_id = self._create_repo( repo_id, private=private, token=token, repo_url=repo_url, organization=organization ) # Create a new empty model card and eventually tag it model_card = create_and_tag_model_card( repo_id, tags, token=token, ignore_metadata_errors=ignore_metadata_errors ) if use_temp_dir is None: use_temp_dir = not os.path.isdir(working_dir) with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir: files_timestamps = self._get_files_timestamps(work_dir) # Save all files. self.save_pretrained(work_dir, max_shard_size=max_shard_size, safe_serialization=safe_serialization) # Update model card if needed: model_card.save(os.path.join(work_dir, "README.md")) return self._upload_modified_files( work_dir, repo_id, files_timestamps, commit_message=commit_message, token=token, create_pr=create_pr, revision=revision, commit_description=commit_description, ) def send_example_telemetry(example_name, *example_args, framework="pytorch"): """ Sends telemetry that helps tracking the examples use. Args: example_name (`str`): The name of the example. *example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only try to extract the model and dataset name from those. Nothing else is tracked. framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example. """ if is_offline_mode(): return data = {"example": example_name, "framework": framework} for args in example_args: args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith("_") and v is not None} if "model_name_or_path" in args_as_dict: model_name = args_as_dict["model_name_or_path"] # Filter out local paths if not os.path.isdir(model_name): data["model_name"] = args_as_dict["model_name_or_path"] if "dataset_name" in args_as_dict: data["dataset_name"] = args_as_dict["dataset_name"] elif "task_name" in args_as_dict: # Extract script name from the example_name script_name = example_name.replace("tf_", "").replace("flax_", "").replace("run_", "") script_name = script_name.replace("_no_trainer", "") data["dataset_name"] = f"{script_name}-{args_as_dict['task_name']}" # Send telemetry in the background send_telemetry( topic="examples", library_name="transformers", library_version=__version__, user_agent=http_user_agent(data) ) def convert_file_size_to_int(size: Union[int, str]): """ Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ``` """ if isinstance(size, int): return size if size.upper().endswith("GIB"): return int(size[:-3]) * (2**30) if size.upper().endswith("MIB"): return int(size[:-3]) * (2**20) if size.upper().endswith("KIB"): return int(size[:-3]) * (2**10) if size.upper().endswith("GB"): int_size = int(size[:-2]) * (10**9) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("MB"): int_size = int(size[:-2]) * (10**6) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("KB"): int_size = int(size[:-2]) * (10**3) return int_size // 8 if size.endswith("b") else int_size raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") def get_checkpoint_shard_files( pretrained_model_name_or_path, index_filename, cache_dir=None, force_download=False, proxies=None, resume_download=None, local_files_only=False, token=None, user_agent=None, revision=None, subfolder="", _commit_hash=None, **deprecated_kwargs, ): """ For a given model: - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the Hub - returns the list of paths to all the shards, as well as some metadata. For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub). """ import json use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token if not os.path.isfile(index_filename): raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.") with open(index_filename, "r") as f: index = json.loads(f.read()) shard_filenames = sorted(set(index["weight_map"].values())) sharded_metadata = index["metadata"] sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys()) sharded_metadata["weight_map"] = index["weight_map"].copy() # First, let's deal with local folder. if os.path.isdir(pretrained_model_name_or_path): shard_filenames = [os.path.join(pretrained_model_name_or_path, subfolder, f) for f in shard_filenames] return shard_filenames, sharded_metadata # At this stage pretrained_model_name_or_path is a model identifier on the Hub cached_filenames = [] # Check if the model is already cached or not. We only try the last checkpoint, this should cover most cases of # downloaded (if interrupted). last_shard = try_to_load_from_cache( pretrained_model_name_or_path, shard_filenames[-1], cache_dir=cache_dir, revision=_commit_hash ) show_progress_bar = last_shard is None or force_download for shard_filename in tqdm(shard_filenames, desc="Downloading shards", disable=not show_progress_bar): try: # Load from URL cached_filename = cached_file( pretrained_model_name_or_path, shard_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=_commit_hash, ) # We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so # we don't have to catch them here. except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {shard_filename} which is " "required according to the checkpoint index." ) except HTTPError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {shard_filename}. You should try" " again after checking your internet connection." ) cached_filenames.append(cached_filename) return cached_filenames, sharded_metadata # All what is below is for conversion between old cache format and new cache format. def get_all_cached_files(cache_dir=None): """ Returns a list for all files cached with appropriate metadata. """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE else: cache_dir = str(cache_dir) if not os.path.isdir(cache_dir): return [] cached_files = [] for file in os.listdir(cache_dir): meta_path = os.path.join(cache_dir, f"{file}.json") if not os.path.isfile(meta_path): continue with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"].replace('"', "") cached_files.append({"file": file, "url": url, "etag": etag}) return cached_files def extract_info_from_url(url): """ Extract repo_name, revision and filename from an url. """ search = re.search(r"^https://huggingface\.co/(.*)/resolve/([^/]*)/(.*)$", url) if search is None: return None repo, revision, filename = search.groups() cache_repo = "--".join(["models"] + repo.split("/")) return {"repo": cache_repo, "revision": revision, "filename": filename} def create_and_tag_model_card( repo_id: str, tags: Optional[List[str]] = None, token: Optional[str] = None, ignore_metadata_errors: bool = False, ): """ Creates or loads an existing model card and tags it. Args: repo_id (`str`): The repo_id where to look for the model card. tags (`List[str]`, *optional*): The list of tags to add in the model card token (`str`, *optional*): Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to the stored token. ignore_metadata_errors (`str`): If True, errors while parsing the metadata section will be ignored. Some information might be lost during the process. Use it at your own risk. """ try: # Check if the model card is present on the remote repo model_card = ModelCard.load(repo_id, token=token, ignore_metadata_errors=ignore_metadata_errors) except EntryNotFoundError: # Otherwise create a simple model card from template model_description = "This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated." card_data = ModelCardData(tags=[] if tags is None else tags, library_name="transformers") model_card = ModelCard.from_template(card_data, model_description=model_description) if tags is not None: for model_tag in tags: if model_tag not in model_card.data.tags: model_card.data.tags.append(model_tag) return model_card def clean_files_for(file): """ Remove, if they exist, file, file.json and file.lock """ for f in [file, f"{file}.json", f"{file}.lock"]: if os.path.isfile(f): os.remove(f) def move_to_new_cache(file, repo, filename, revision, etag, commit_hash): """ Move file to repo following the new huggingface hub cache organization. """ os.makedirs(repo, exist_ok=True) # refs os.makedirs(os.path.join(repo, "refs"), exist_ok=True) if revision != commit_hash: ref_path = os.path.join(repo, "refs", revision) with open(ref_path, "w") as f: f.write(commit_hash) # blobs os.makedirs(os.path.join(repo, "blobs"), exist_ok=True) blob_path = os.path.join(repo, "blobs", etag) shutil.move(file, blob_path) # snapshots os.makedirs(os.path.join(repo, "snapshots"), exist_ok=True) os.makedirs(os.path.join(repo, "snapshots", commit_hash), exist_ok=True) pointer_path = os.path.join(repo, "snapshots", commit_hash, filename) huggingface_hub.file_download._create_relative_symlink(blob_path, pointer_path) clean_files_for(file) def move_cache(cache_dir=None, new_cache_dir=None, token=None): if new_cache_dir is None: new_cache_dir = TRANSFORMERS_CACHE if cache_dir is None: # Migrate from old cache in .cache/huggingface/transformers old_cache = Path(TRANSFORMERS_CACHE).parent / "transformers" if os.path.isdir(str(old_cache)): cache_dir = str(old_cache) else: cache_dir = new_cache_dir cached_files = get_all_cached_files(cache_dir=cache_dir) logger.info(f"Moving {len(cached_files)} files to the new cache system") hub_metadata = {} for file_info in tqdm(cached_files): url = file_info.pop("url") if url not in hub_metadata: try: hub_metadata[url] = get_hf_file_metadata(url, token=token) except requests.HTTPError: continue etag, commit_hash = hub_metadata[url].etag, hub_metadata[url].commit_hash if etag is None or commit_hash is None: continue if file_info["etag"] != etag: # Cached file is not up to date, we just throw it as a new version will be downloaded anyway. clean_files_for(os.path.join(cache_dir, file_info["file"])) continue url_info = extract_info_from_url(url) if url_info is None: # Not a file from huggingface.co continue repo = os.path.join(new_cache_dir, url_info["repo"]) move_to_new_cache( file=os.path.join(cache_dir, file_info["file"]), repo=repo, filename=url_info["filename"], revision=url_info["revision"], etag=etag, commit_hash=commit_hash, ) class PushInProgress: """ Internal class to keep track of a push in progress (which might contain multiple `Future` jobs). """ def __init__(self, jobs: Optional[futures.Future] = None) -> None: self.jobs = [] if jobs is None else jobs def is_done(self): return all(job.done() for job in self.jobs) def wait_until_done(self): futures.wait(self.jobs) def cancel(self) -> None: self.jobs = [ job for job in self.jobs # Cancel the job if it wasn't started yet and remove cancelled/done jobs from the list if not (job.cancel() or job.done()) ] cache_version_file = os.path.join(TRANSFORMERS_CACHE, "version.txt") if not os.path.isfile(cache_version_file): cache_version = 0 else: with open(cache_version_file) as f: try: cache_version = int(f.read()) except ValueError: cache_version = 0 cache_is_not_empty = os.path.isdir(TRANSFORMERS_CACHE) and len(os.listdir(TRANSFORMERS_CACHE)) > 0 if cache_version < 1 and cache_is_not_empty: if is_offline_mode(): logger.warning( "You are offline and the cache for model files in Transformers v4.22.0 has been updated while your local " "cache seems to be the one of a previous version. It is very likely that all your calls to any " "`from_pretrained()` method will fail. Remove the offline mode and enable internet connection to have " "your cache be updated automatically, then you can go back to offline mode." ) else: logger.warning( "The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a " "one-time only operation. You can interrupt this and resume the migration later on by calling " "`transformers.utils.move_cache()`." ) try: if TRANSFORMERS_CACHE != constants.HF_HUB_CACHE: # Users set some env variable to customize cache storage move_cache(TRANSFORMERS_CACHE, TRANSFORMERS_CACHE) else: move_cache() except Exception as e: trace = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " "file an issue at https://github.com/huggingface/transformers/issues/new/choose and copy paste this whole " "message and we will do our best to help." ) if cache_version < 1: try: os.makedirs(TRANSFORMERS_CACHE, exist_ok=True) with open(cache_version_file, "w") as f: f.write("1") except Exception: logger.warning( f"There was a problem when trying to write in your cache folder ({TRANSFORMERS_CACHE}). You should set " "the environment variable TRANSFORMERS_CACHE to a writable directory." )
transformers/src/transformers/utils/hub.py/0
{ "file_path": "transformers/src/transformers/utils/hub.py", "repo_id": "transformers", "token_count": 24076 }
414
**TEMPLATE** ===================================== *search & replace the following keywords, e.g.:* `:%s/\[name of model\]/brand_new_bert/g` -[lowercase name of model] # e.g. brand_new_bert -[camelcase name of model] # e.g. BrandNewBert -[name of mentor] # e.g. [Peter](https://github.com/peter) -[link to original repo] -[start date] -[end date] How to add [camelcase name of model] to 🤗 Transformers? ===================================== Mentor: [name of mentor] Begin: [start date] Estimated End: [end date] Adding a new model is often difficult and requires an in-depth knowledge of the 🤗 Transformers library and ideally also of the model's original repository. At Hugging Face, we are trying to empower the community more and more to add models independently. The following sections explain in detail how to add [camelcase name of model] to Transformers. You will work closely with [name of mentor] to integrate [camelcase name of model] into Transformers. By doing so, you will both gain a theoretical and deep practical understanding of [camelcase name of model]. But more importantly, you will have made a major open-source contribution to Transformers. Along the way, you will: - get insights into open-source best practices - understand the design principles of one of the most popular NLP libraries - learn how to do efficiently test large NLP models - learn how to integrate Python utilities like `black`, `ruff`, `make fix-copies` into a library to always ensure clean and readable code To start, let's try to get a general overview of the Transformers library. General overview of 🤗 Transformers ---------------------------------- First, you should get a general overview of 🤗 Transformers. Transformers is a very opinionated library, so there is a chance that you don't agree with some of the library's philosophies or design choices. From our experience, however, we found that the fundamental design choices and philosophies of the library are crucial to efficiently scale Transformers while keeping maintenance costs at a reasonable level. A good first starting point to better understand the library is to read the [documentation of our philosophy](https://huggingface.co/transformers/philosophy.html). As a result of our way of working, there are some choices that we try to apply to all models: - Composition is generally favored over abstraction - Duplicating code is not always bad if it strongly improves the readability or accessibility of a model - Model files are as self-contained as possible so that when you read the code of a specific model, you ideally only have to look into the respective `modeling_....py` file. In our opinion, the library's code is not just a means to provide a product, *e.g.*, the ability to use BERT for inference, but also as the very product that we want to improve. Hence, when adding a model, the user is not only the person that will use your model, but also everybody that will read, try to understand, and possibly tweak your code. With this in mind, let's go a bit deeper into the general library design. ### Overview of models To successfully add a model, it is important to understand the interaction between your model and its config, `PreTrainedModel`, and `PretrainedConfig`. For exemplary purposes, we will call the PyTorch model to be added to 🤗 Transformers `BrandNewBert`. Let's take a look: ![image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png) As you can see, we do make use of inheritance in 🤗 Transformers, but we keep the level of abstraction to an absolute minimum. There are never more than two levels of abstraction for any model in the library. `BrandNewBertModel` inherits from `BrandNewBertPreTrainedModel` which in turn inherits from `PreTrainedModel` and that's it. As a general rule, we want to make sure that a new model only depends on `PreTrainedModel`. The important functionalities that are automatically provided to every new model are `PreTrainedModel.from_pretrained` and `PreTrainedModel.save_pretrained`, which are used for serialization and deserialization. All of the other important functionalities, such as `BrandNewBertModel.forward` should be completely defined in the new `modeling_brand_new_bert.py` module. Next, we want to make sure that a model with a specific head layer, such as `BrandNewBertForMaskedLM` does not inherit from `BrandNewBertModel`, but rather uses `BrandNewBertModel` as a component that can be called in its forward pass to keep the level of abstraction low. Every new model requires a configuration class, called `BrandNewBertConfig`. This configuration is always stored as an attribute in `PreTrainedModel`, and thus can be accessed via the `config` attribute for all classes inheriting from `BrandNewBertPreTrainedModel` ```python # assuming that `brand_new_bert` belongs to the organization `brandy` model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` Similar to the model, the configuration inherits basic serialization and deserialization functionalities from `PretrainedConfig`. Note that the configuration and the model are always serialized into two different formats - the model to a `pytorch_model.bin` file and the configuration to a `config.json` file. Calling `PreTrainedModel.save_pretrained` will automatically call `PretrainedConfig.save_pretrained`, so that both model and configuration are saved. ### Overview of tokenizers Not quite ready yet :-( This section will be added soon! Step-by-step recipe to add a model to 🤗 Transformers ---------------------------------------------------- Everyone has different preferences of how to port a model so it can be very helpful for you to take a look at summaries of how other contributors ported models to Hugging Face. Here is a list of community blog posts on how to port a model: 1. [Porting GPT2 Model](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) by [Thomas](https://huggingface.co/thomwolf) 2. [Porting WMT19 MT Model](https://huggingface.co/blog/porting-fsmt) by [Stas](https://huggingface.co/stas) From experience, we can tell you that the most important things to keep in mind when adding a model are: - Don't reinvent the wheel! Most parts of the code you will add for the new 🤗 Transformers model already exist somewhere in 🤗 Transformers. Take some time to find similar, already existing models and tokenizers you can copy from. [grep](https://www.gnu.org/software/grep/) and [rg](https://github.com/BurntSushi/ripgrep) are your friends. Note that it might very well happen that your model's tokenizer is based on one model implementation, and your model's modeling code on another one. *E.g.*, FSMT's modeling code is based on BART, while FSMT's tokenizer code is based on XLM. - It's more of an engineering challenge than a scientific challenge. You should spend more time on creating an efficient debugging environment than trying to understand all theoretical aspects of the model in the paper. - Ask for help when you're stuck! Models are the core component of 🤗 Transformers so we, at Hugging Face, are more than happy to help you at every step to add your model. Don't hesitate to ask if you notice you are not making progress. In the following, we try to give you a general recipe that we found most useful when porting a model to 🤗 Transformers. The following list is a summary of everything that has to be done to add a model and can be used by you as a To-Do List: 1. [ ] (Optional) Understood theoretical aspects 2. [ ] Prepared transformers dev environment 3. [ ] Set up debugging environment of the original repository 4. [ ] Created script that successfully runs forward pass using original repository and checkpoint 5. [ ] Successfully opened a PR and added the model skeleton to Transformers 6. [ ] Successfully converted original checkpoint to Transformers checkpoint 7. [ ] Successfully ran forward pass in Transformers that gives identical output to original checkpoint 8. [ ] Finished model tests in Transformers 9. [ ] Successfully added Tokenizer in Transformers 10. [ ] Run end-to-end integration tests 11. [ ] Finished docs 12. [ ] Uploaded model weights to the hub 13. [ ] Submitted the pull request for review 14. [ ] (Optional) Added a demo notebook To begin with, we usually recommend to start by getting a good theoretical understanding of `[camelcase name of model]`. However, if you prefer to understand the theoretical aspects of the model *on-the-job*, then it is totally fine to directly dive into the `[camelcase name of model]`'s code-base. This option might suit you better, if your engineering skills are better than your theoretical skill, if you have trouble understanding `[camelcase name of model]`'s paper, or if you just enjoy programming much more than reading scientific papers. ### 1. (Optional) Theoretical aspects of [camelcase name of model] You should take some time to read *[camelcase name of model]'s* paper, if such descriptive work exists. There might be large sections of the paper that are difficult to understand. If this is the case, this is fine - don't worry! The goal is not to get a deep theoretical understanding of the paper, but to extract the necessary information required to effectively re-implement the model in 🤗 Transformers. That being said, you don't have to spend too much time on the theoretical aspects, but rather focus on the practical ones, namely: - What type of model is *[camelcase name of model]*? BERT-like encoder-only model? GPT2-like decoder-only model? BART-like encoder-decoder model? Look at the `model_summary` if you're not familiar with the differences between those. - What are the applications of *[camelcase name of model]*? Text classification? Text generation? Seq2Seq tasks, *e.g.,* summarization? - What is the novel feature of the model making it different from BERT/GPT-2/BART? - Which of the already existing [🤗 Transformers models](https://huggingface.co/transformers/#contents) is most similar to *[camelcase name of model]*? - What type of tokenizer is used? A sentencepiece tokenizer? Word piece tokenizer? Is it the same tokenizer as used for BERT or BART? After you feel like you have gotten a good overview of the architecture of the model, you might want to write to [name of mentor] with any questions you might have. This might include questions regarding the model's architecture, its attention layer, etc. We will be more than happy to help you. #### Additional resources Before diving into the code, here are some additional resources that might be worth taking a look at: - [link 1] - [link 2] - [link 3] - ... #### Make sure you've understood the fundamental aspects of [camelcase name of model] Alright, now you should be ready to take a closer look into the actual code of [camelcase name of model]. You should have understood the following aspects of [camelcase name of model] by now: - [characteristic 1 of [camelcase name of model]] - [characteristic 2 of [camelcase name of model]] - ... If any of the mentioned aspects above are **not** clear to you, now is a great time to talk to [name of mentor]. ### 2. Next prepare your environment 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your `transformers` fork to your local disk, and add the base repository as a remote: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Set up a development environment, for instance by running the following command: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` and return to the parent directory ```bash cd .. ``` 4. We recommend adding the PyTorch version of *[camelcase name of model]* to Transformers. To install PyTorch, please follow the instructions [here](https://pytorch.org/get-started/locally/). **Note:** You don't need to have CUDA installed. Making the new model work on CPU is sufficient. 5. To port *[camelcase name of model]*, you will also need access to its original repository: ```bash git clone [link to original repo].git cd [lowercase name of model] pip install -e . ``` Now you have set up a development environment to port *[camelcase name of model]* to 🤗 Transformers. ### Run a pretrained checkpoint using the original repository **3. Set up debugging environment** At first, you will work on the original *[camelcase name of model]* repository. Often, the original implementation is very "researchy". Meaning that documentation might be lacking and the code can be difficult to understand. But this should be exactly your motivation to reimplement *[camelcase name of model]*. At Hugging Face, one of our main goals is to *make people stand on the shoulders of giants* which translates here very well into taking a working model and rewriting it to make it as **accessible, user-friendly, and beautiful** as possible. This is the number-one motivation to re-implement models into 🤗 Transformers - trying to make complex new NLP technology accessible to **everybody**. You should start thereby by diving into the [original repository]([link to original repo]). Successfully running the official pretrained model in the original repository is often **the most difficult** step. From our experience, it is very important to spend some time getting familiar with the original code-base. You need to figure out the following: - Where to find the pretrained weights? - How to load the pretrained weights into the corresponding model? - How to run the tokenizer independently from the model? - Trace one forward pass so that you know which classes and functions are required for a simple forward pass. Usually, you only have to reimplement those functions. - Be able to locate the important components of the model: Where is the model's class? Are there model sub-classes, *e.g.*, EncoderModel, DecoderModel? Where is the self-attention layer? Are there multiple different attention layers, *e.g.*, *self-attention*, *cross-attention*...? - How can you debug the model in the original environment of the repo? Do you have to add `print` statements, can you work with an interactive debugger like [ipdb](https://pypi.org/project/ipdb/), or should you use an efficient IDE to debug the model, like PyCharm? It is very important that before you start the porting process, that you can **efficiently** debug code in the original repository! Also, remember that you are working with an open-source library, so do not hesitate to open an issue, or even a pull request in the original repository. The maintainers of this repository are most likely very happy about someone looking into their code! At this point, it is really up to you which debugging environment and strategy you prefer to use to debug the original model. We strongly advise against setting up a costly GPU environment, but simply work on a CPU both when starting to dive into the original repository and also when starting to write the 🤗 Transformers implementation of the model. Only at the very end, when the model has already been successfully ported to 🤗 Transformers, one should verify that the model also works as expected on GPU. In general, there are two possible debugging environments for running the original model - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - Local python scripts. Jupyter notebooks have the advantage that they allow for cell-by-cell execution which can be helpful to better split logical components from one another and to have faster debugging cycles as intermediate results can be stored. Also, notebooks are often easier to share with other contributors, which might be very helpful if you want to ask the Hugging Face team for help. If you are familiar with Jupyter notebooks, we strongly recommend you to work with them. The obvious disadvantage of Jupyter notebooks is that if you are not used to working with them you will have to spend some time adjusting to the new programming environment and that you might not be able to use your known debugging tools anymore, like `ipdb`. **4. Successfully run forward pass** For each code-base, a good first step is always to load a **small** pretrained checkpoint and to be able to reproduce a single forward pass using a dummy integer vector of input IDs as an input. Such a script could look like this (in pseudocode): ```python model = [camelcase name of model]Model.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` Next, regarding the debugging strategy, there are generally a few from which to choose from: - Decompose the original model into many small testable components and run a forward pass on each of those for verification - Decompose the original model only into the original *tokenizer* and the original *model*, run a forward pass on those, and use intermediate print statements or breakpoints for verification Again, it is up to you which strategy to choose. Often, one or the other is advantageous depending on the original code base. If the original code-base allows you to decompose the model into smaller sub-components, *e.g.*, if the original code-base can easily be run in eager mode, it is usually worth the effort to do so. There are some important advantages to taking the more difficult road in the beginning: - at a later stage when comparing the original model to the Hugging Face implementation, you can verify automatically for each component individually that the corresponding component of the 🤗 Transformers implementation matches instead of relying on visual comparison via print statements - it can give you some rope to decompose the big problem of porting a model into smaller problems of just porting individual components and thus structure your work better - separating the model into logical meaningful components will help you to get a better overview of the model's design and thus to better understand the model - at a later stage those component-by-component tests help you to ensure that no regression occurs as you continue changing your code [Lysandre's](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) integration checks for ELECTRA gives a nice example of how this can be done. However, if the original code-base is very complex or only allows intermediate components to be run in a compiled mode, it might be too time-consuming or even impossible to separate the model into smaller testable sub-components. A good example is [T5's MeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow) library which is very complex and does not offer a simple way to decompose the model into its sub-components. For such libraries, one often relies on verifying print statements. No matter which strategy you choose, the recommended procedure is often the same in that you should start to debug the starting layers first and the ending layers last. It is recommended that you retrieve the output, either by print statements or sub-component functions, of the following layers in the following order: 1. Retrieve the input IDs passed to the model 2. Retrieve the word embeddings 3. Retrieve the input of the first Transformer layer 4. Retrieve the output of the first Transformer layer 5. Retrieve the output of the following n - 1 Transformer layers 6. Retrieve the output of the whole [camelcase name of model] Model Input IDs should thereby consists of an array of integers, *e.g.*, `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` The outputs of the following layers often consist of multi-dimensional float arrays and can look like this: ```bash [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` We expect that every model added to 🤗 Transformers passes a couple of integration tests, meaning that the original model and the reimplemented version in 🤗 Transformers have to give the exact same output up to a precision of 0.001! Since it is normal that the exact same model written in different libraries can give a slightly different output depending on the library framework, we accept an error tolerance of 1e-3 (0.001). It is not enough if the model gives nearly the same output, they have to be the almost identical. Therefore, you will certainly compare the intermediate outputs of the 🤗 Transformers version multiple times against the intermediate outputs of the original implementation of *[camelcase name of model]* in which case an **efficient** debugging environment of the original repository is absolutely important. Here is some advice to make your debugging environment as efficient as possible. - Find the best way of debugging intermediate results. Is the original repository written in PyTorch? Then you should probably take the time to write a longer script that decomposes the original model into smaller sub-components to retrieve intermediate values. Is the original repository written in Tensorflow 1? Then you might have to rely on TensorFlow print operations like [tf.print](https://www.tensorflow.org/api_docs/python/tf/print) to output intermediate values. Is the original repository written in Jax? Then make sure that the model is **not jitted** when running the forward pass, *e.g.*, check-out [this link](https://github.com/google/jax/issues/196). - Use the smallest pretrained checkpoint you can find. The smaller the checkpoint, the faster your debug cycle becomes. It is not efficient if your pretrained model is so big that your forward pass takes more than 10 seconds. In case only very large checkpoints are available, it might make more sense to create a dummy model in the new environment with randomly initialized weights and save those weights for comparison with the 🤗 Transformers version of your model - Make sure you are using the easiest way of calling a forward pass in the original repository. Ideally, you want to find the function in the original repository that **only** calls a single forward pass, *i.e.* that is often called `predict`, `evaluate`, `forward` or `__call__`. You don't want to debug a function that calls `forward` multiple times, *e.g.*, to generate text, like `autoregressive_sample`, `generate`. - Try to separate the tokenization from the model's forward pass. If the original repository shows examples where you have to input a string, then try to find out where in the forward call the string input is changed to input ids and start from this point. This might mean that you have to possibly write a small script yourself or change the original code so that you can directly input the ids instead of an input string. - Make sure that the model in your debugging setup is **not** in training mode, which often causes the model to yield random outputs due to multiple dropout layers in the model. Make sure that the forward pass in your debugging environment is **deterministic** so that the dropout layers are not used. Or use `transformers.utils.set_seed` if the old and new implementations are in the same framework. #### More details on how to create a debugging environment for [camelcase name of model] [TODO FILL: Here the mentor should add very specific information on what the student should do] [to set up an efficient environment for the special requirements of this model] ### Port [camelcase name of model] to 🤗 Transformers Next, you can finally start adding new code to 🤗 Transformers. Go into the clone of your 🤗 Transformers' fork: cd transformers In the special case that you are adding a model whose architecture exactly matches the model architecture of an existing model you only have to add a conversion script as described in [this section](#write-a-conversion-script). In this case, you can just re-use the whole model architecture of the already existing model. Otherwise, let's start generating a new model with the amazing Cookiecutter! **Use the Cookiecutter to automatically generate the model's code** To begin with head over to the [🤗 Transformers templates](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model) to make use of our `cookiecutter` implementation to automatically generate all the relevant files for your model. Again, we recommend only adding the PyTorch version of the model at first. Make sure you follow the instructions of the `README.md` on the [🤗 Transformers templates](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model) carefully. **Open a Pull Request on the main huggingface/transformers repo** Before starting to adapt the automatically generated code, now is the time to open a "Work in progress (WIP)" pull request, *e.g.*, "\[WIP\] Add *[camelcase name of model]*", in 🤗 Transformers so that you and the Hugging Face team can work side-by-side on integrating the model into 🤗 Transformers. You should do the following: 1. Create a branch with a descriptive name from your main branch ```bash git checkout -b add_[lowercase name of model] ``` 2. Commit the automatically generated code: ```bash git add . git commit ``` 3. Fetch and rebase to current main ```bash git fetch upstream git rebase upstream/main ``` 4. Push the changes to your account using: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. Once you are satisfied, go to the webpage of your fork on GitHub. Click on "Pull request". Make sure to add the GitHub handle of [name of mentor] as a reviewer, so that the Hugging Face team gets notified for future changes. 6. Change the PR into a draft by clicking on "Convert to draft" on the right of the GitHub pull request web page. In the following, whenever you have done some progress, don't forget to commit your work and push it to your account so that it shows in the pull request. Additionally, you should make sure to update your work with the current main from time to time by doing: git fetch upstream git merge upstream/main In general, all questions you might have regarding the model or your implementation should be asked in your PR and discussed/solved in the PR. This way, [name of mentor] will always be notified when you are committing new code or if you have a question. It is often very helpful to point [name of mentor] to your added code so that the Hugging Face team can efficiently understand your problem or question. To do so, you can go to the "Files changed" tab where you see all of your changes, go to a line regarding which you want to ask a question, and click on the "+" symbol to add a comment. Whenever a question or problem has been solved, you can click on the "Resolve" button of the created comment. In the same way, [name of mentor] will open comments when reviewing your code. We recommend asking most questions on GitHub on your PR. For some very general questions that are not very useful for the public, feel free to ping [name of mentor] by Slack or email. **5. Adapt the generated models code for [camelcase name of model]** At first, we will focus only on the model itself and not care about the tokenizer. All the relevant code should be found in the generated files `src/transformers/models/[lowercase name of model]/modeling_[lowercase name of model].py` and `src/transformers/models/[lowercase name of model]/configuration_[lowercase name of model].py`. Now you can finally start coding :). The generated code in `src/transformers/models/[lowercase name of model]/modeling_[lowercase name of model].py` will either have the same architecture as BERT if it's an encoder-only model or BART if it's an encoder-decoder model. At this point, you should remind yourself what you've learned in the beginning about the theoretical aspects of the model: *How is the model different from BERT or BART?*\". Implement those changes which often means to change the *self-attention* layer, the order of the normalization layer, etc... Again, it is often useful to look at the similar architecture of already existing models in Transformers to get a better feeling of how your model should be implemented. **Note** that at this point, you don't have to be very sure that your code is fully correct or clean. Rather, it is advised to add a first *unclean*, copy-pasted version of the original code to `src/transformers/models/[lowercase name of model]/modeling_[lowercase name of model].py` until you feel like all the necessary code is added. From our experience, it is much more efficient to quickly add a first version of the required code and improve/correct the code iteratively with the conversion script as described in the next section. The only thing that has to work at this point is that you can instantiate the 🤗 Transformers implementation of *[camelcase name of model]*, *i.e.* the following command should work: ```python from transformers import [camelcase name of model]Model, [camelcase name of model]Config model = [camelcase name of model]Model([camelcase name of model]Config()) ``` The above command will create a model according to the default parameters as defined in `[camelcase name of model]Config()` with random weights, thus making sure that the `init()` methods of all components works. [TODO FILL: Here the mentor should add very specific information on what exactly has to be changed for this model] [...] [...] **6. Write a conversion script** Next, you should write a conversion script that lets you convert the checkpoint you used to debug *[camelcase name of model]* in the original repository to a checkpoint compatible with your just created 🤗 Transformers implementation of *[camelcase name of model]*. It is not advised to write the conversion script from scratch, but rather to look through already existing conversion scripts in 🤗 Transformers for one that has been used to convert a similar model that was written in the same framework as *[camelcase name of model]*. Usually, it is enough to copy an already existing conversion script and slightly adapt it for your use case. Don't hesitate to ask [name of mentor] to point you to a similar already existing conversion script for your model. - If you are porting a model from TensorFlow to PyTorch, a good starting point might be BERT's conversion script [here](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - If you are porting a model from PyTorch to PyTorch, a good starting point might be BART's conversion script [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) In the following, we'll quickly explain how PyTorch models store layer weights and define layer names. In PyTorch, the name of a layer is defined by the name of the class attribute you give the layer. Let's define a dummy model in PyTorch, called `SimpleModel` as follows: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` Now we can create an instance of this model definition which will fill all weights: `dense`, `intermediate`, `layer_norm` with random weights. We can print the model to see its architecture ```python model = SimpleModel() print(model) ``` This will print out the following: ```bash SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` We can see that the layer names are defined by the name of the class attribute in PyTorch. You can print out the weight values of a specific layer: ```python print(model.dense.weight.data) ``` to see that the weights were randomly initialized ```bash tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` In the conversion script, you should fill those randomly initialized weights with the exact weights of the corresponding layer in the checkpoint. *E.g.*, ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` While doing so, you must verify that each randomly initialized weight of your PyTorch model and its corresponding pretrained checkpoint weight exactly match in both **shape and name**. To do so, it is **necessary** to add assert statements for the shape and print out the names of the checkpoints weights. *E.g.*, you should add statements like: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` Besides, you should also print out the names of both weights to make sure they match, *e.g.*, ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` If either the shape or the name doesn't match, you probably assigned the wrong checkpoint weight to a randomly initialized layer of the 🤗 Transformers implementation. An incorrect shape is most likely due to an incorrect setting of the config parameters in `[camelcase name of model]Config()` that do not exactly match those that were used for the checkpoint you want to convert. However, it could also be that PyTorch's implementation of a layer requires the weight to be transposed beforehand. Finally, you should also check that **all** required weights are initialized and print out all checkpoint weights that were not used for initialization to make sure the model is correctly converted. It is completely normal, that the conversion trials fail with either a wrong shape statement or wrong name assignment. This is most likely because either you used incorrect parameters in `[camelcase name of model]Config()`, have a wrong architecture in the 🤗 Transformers implementation, you have a bug in the `init()` functions of one of the components of the 🤗 Transformers implementation or you need to transpose one of the checkpoint weights. This step should be iterated with the previous step until all weights of the checkpoint are correctly loaded in the Transformers model. Having correctly loaded the checkpoint into the 🤗 Transformers implementation, you can then save the model under a folder of your choice `/path/to/converted/checkpoint/folder` that should then contain both a `pytorch_model.bin` file and a `config.json` file: ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` [TODO FILL: Here the mentor should add very specific information on what exactly has to be done for the conversion of this model] [...] [...] **7. Implement the forward pass** Having managed to correctly load the pretrained weights into the 🤗 Transformers implementation, you should now make sure that the forward pass is correctly implemented. In [Get familiar with the original repository](#34-run-a-pretrained-checkpoint-using-the-original-repository), you have already created a script that runs a forward pass of the model using the original repository. Now you should write an analogous script using the 🤗 Transformers implementation instead of the original one. It should look as follows: [TODO FILL: Here the model name might have to be adapted, *e.g.*, maybe [camelcase name of model]ForConditionalGeneration instead of [camelcase name of model]Model] ```python model = [camelcase name of model]Model.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` It is very likely that the 🤗 Transformers implementation and the original model implementation don't give the exact same output the very first time or that the forward pass throws an error. Don't be disappointed - it's expected! First, you should make sure that the forward pass doesn't throw any errors. It often happens that the wrong dimensions are used leading to a `"Dimensionality mismatch"` error or that the wrong data type object is used, *e.g.*, `torch.long` instead of `torch.float32`. Don't hesitate to ask [name of mentor] for help, if you don't manage to solve certain errors. The final part to make sure the 🤗 Transformers implementation works correctly is to ensure that the outputs are equivalent to a precision of `1e-3`. First, you should ensure that the output shapes are identical, *i.e.* `outputs.shape` should yield the same value for the script of the 🤗 Transformers implementation and the original implementation. Next, you should make sure that the output values are identical as well. This one of the most difficult parts of adding a new model. Common mistakes why the outputs are not identical are: - Some layers were not added, *i.e.* an activation layer was not added, or the residual connection was forgotten - The word embedding matrix was not tied - The wrong positional embeddings are used because the original implementation uses on offset - Dropout is applied during the forward pass. To fix this make sure `model.training is False` and that no dropout layer is falsely activated during the forward pass, *i.e.* pass `self.training` to [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout) The best way to fix the problem is usually to look at the forward pass of the original implementation and the 🤗 Transformers implementation side-by-side and check if there are any differences. Ideally, you should debug/print out intermediate outputs of both implementations of the forward pass to find the exact position in the network where the 🤗 Transformers implementation shows a different output than the original implementation. First, make sure that the hard-coded `input_ids` in both scripts are identical. Next, verify that the outputs of the first transformation of the `input_ids` (usually the word embeddings) are identical. And then work your way up to the very last layer of the network. At some point, you will notice a difference between the two implementations, which should point you to the bug in the 🤗 Transformers implementation. From our experience, a simple and efficient way is to add many print statements in both the original implementation and 🤗 Transformers implementation, at the same positions in the network respectively, and to successively remove print statements showing the same values for intermediate presentions. When you're confident that both implementations yield the same output, verifying the outputs with `torch.allclose(original_output, output, atol=1e-3)`, you're done with the most difficult part! Congratulations - the work left to be done should be a cakewalk 😊. **8. Adding all necessary model tests** At this point, you have successfully added a new model. However, it is very much possible that the model does not yet fully comply with the required design. To make sure, the implementation is fully compatible with 🤗 Transformers, all common tests should pass. The Cookiecutter should have automatically added a test file for your model, probably under the same `tests/test_modeling_[lowercase name of model].py`. Run this test file to verify that all common tests pass: ```python pytest tests/test_modeling_[lowercase name of model].py ``` [TODO FILL: Here the mentor should add very specific information on what tests are likely to fail after having implemented the model , e.g. given the model, it might be very likely that `test_attention_output` fails] [...] [...] Having fixed all common tests, it is now crucial to ensure that all the nice work you have done is well tested, so that - a) The community can easily understand your work by looking at specific tests of *[camelcase name of model]* - b) Future changes to your model will not break any important feature of the model. At first, integration tests should be added. Those integration tests essentially do the same as the debugging scripts you used earlier to implement the model to 🤗 Transformers. A template of those model tests is already added by the Cookiecutter, called `[camelcase name of model]ModelIntegrationTests` and only has to be filled out by you. To ensure that those tests are passing, run ```python RUN_SLOW=1 pytest -sv tests/test_modeling_[lowercase name of model].py::[camelcase name of model]ModelIntegrationTests ``` **Note:** In case you are using Windows, you should replace `RUN_SLOW=1` with `SET RUN_SLOW=1` Second, all features that are special to *[camelcase name of model]* should be tested additionally in a separate test under `[camelcase name of model]ModelTester`/`[camelcase name of model]ModelTest`. This part is often forgotten but is extremely useful in two ways: - It helps to transfer the knowledge you have acquired during the model addition to the community by showing how the special features of *[camelcase name of model]* should work. - Future contributors can quickly test changes to the model by running those special tests. [TODO FILL: Here the mentor should add very specific information on what special features of the model should be tested additionally] [...] [...] **9. Implement the tokenizer** Next, we should add the tokenizer of *[camelcase name of model]*. Usually, the tokenizer is equivalent or very similar to an already existing tokenizer of 🤗 Transformers. [TODO FILL: Here the mentor should add a comment whether a new tokenizer is required or if this is not the case which existing tokenizer closest resembles [camelcase name of model]'s tokenizer and how the tokenizer should be implemented] [...] [...] It is very important to find/extract the original tokenizer file and to manage to load this file into the 🤗 Transformers' implementation of the tokenizer. For [camelcase name of model], the tokenizer files can be found here: - [To be filled out by mentor] and having implemented the 🤗 Transformers' version of the tokenizer can be loaded as follows: [To be filled out by mentor] To ensure that the tokenizer works correctly, it is recommended to first create a script in the original repository that inputs a string and returns the `input_ids`. It could look similar to this (in pseudo-code): ```bash input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = [camelcase name of model]Model.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` You might have to take a deeper look again into the original repository to find the correct tokenizer function or you might even have to do changes to your clone of the original repository to only output the `input_ids`. Having written a functional tokenization script that uses the original repository, an analogous script for 🤗 Transformers should be created. It should look similar to this: ```python from transformers import [camelcase name of model]Tokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = [camelcase name of model]Tokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` When both `input_ids` yield the same values, as a final step a tokenizer test file should also be added. [TODO FILL: Here mentor should point the student to test files of similar tokenizers] Analogous to the modeling test files of *[camelcase name of model]*, the tokenization test files of *[camelcase name of model]* should contain a couple of hard-coded integration tests. [TODO FILL: Here mentor should again point to an existing similar test of another model that the student can copy & adapt] **10. Run End-to-end integration tests** Having added the tokenizer, you should also add a couple of end-to-end integration tests using both the model and the tokenizer to `tests/test_modeling_[lowercase name of model].py` in 🤗 Transformers. Such a test should show on a meaningful text-to-text sample that the 🤗 Transformers implementation works as expected. A meaningful text-to-text sample can include *e.g.* a source-to-target-translation pair, an article-to-summary pair, a question-to-answer pair, etc... If none of the ported checkpoints has been fine-tuned on a downstream task it is enough to simply rely on the model tests. In a final step to ensure that the model is fully functional, it is advised that you also run all tests on GPU. It can happen that you forgot to add some `.to(self.device)` statements to internal tensors of the model, which in such a test would show in an error. In case you have no access to a GPU, the Hugging Face team can take care of running those tests for you. **11. Add Docstring** Now, all the necessary functionality for *[camelcase name of model]* is added - you're almost done! The only thing left to add is a nice docstring and a doc page. The Cookiecutter should have added a template file called `docs/source/model_doc/[lowercase name of model].rst` that you should fill out. Users of your model will usually first look at this page before using your model. Hence, the documentation must be understandable and concise. It is very useful for the community to add some *Tips* to show how the model should be used. Don't hesitate to ping [name of mentor] regarding the docstrings. Next, make sure that the docstring added to `src/transformers/models/[lowercase name of model]/modeling_[lowercase name of model].py` is correct and included all necessary inputs and outputs. It is always to good to remind oneself that documentation should be treated at least as carefully as the code in 🤗 Transformers since the documentation is usually the first contact point of the community with the model. **Code refactor** Great, now you have added all the necessary code for *[camelcase name of model]*. At this point, you should correct some potential incorrect code style by running: ```bash make style ``` and verify that your coding style passes the quality check: ```bash make quality ``` There are a couple of other very strict design tests in 🤗 Transformers that might still be failing, which shows up in the tests of your pull request. This is often because of some missing information in the docstring or some incorrect naming. [name of mentor] will surely help you if you're stuck here. Lastly, it is always a good idea to refactor one's code after having ensured that the code works correctly. With all tests passing, now it's a good time to go over the added code again and do some refactoring. You have now finished the coding part, congratulation! 🎉 You are Awesome! 😎 **12. Upload the models to the model hub** In this final part, you should convert and upload all checkpoints to the model hub and add a model card for each uploaded model checkpoint. You should work alongside [name of mentor] here to decide on a fitting name for each checkpoint and to get the required access rights to be able to upload the model under the author's organization of *[camelcase name of model]*. It is worth spending some time to create fitting model cards for each checkpoint. The model cards should highlight the specific characteristics of this particular checkpoint, *e.g.*, On which dataset was the checkpoint pretrained/fine-tuned on? On what down-stream task should the model be used? And also include some code on how to correctly use the model. **13. (Optional) Add notebook** It is very helpful to add a notebook that showcases in-detail how *[camelcase name of model]* can be used for inference and/or fine-tuned on a downstream task. This is not mandatory to merge your PR, but very useful for the community. **14. Submit your finished PR** You're done programming now and can move to the last step, which is getting your PR merged into main. Usually, [name of mentor] should have helped you already at this point, but it is worth taking some time to give your finished PR a nice description and eventually add comments to your code, if you want to point out certain design choices to your reviewer. ### Share your work!! Now, it's time to get some credit from the community for your work! Having completed a model addition is a major contribution to Transformers and the whole NLP community. Your code and the ported pre-trained models will certainly be used by hundreds and possibly even thousands of developers and researchers. You should be proud of your work and share your achievement with the community. **You have made another model that is super easy to access for everyone in the community! 🤯**
transformers/templates/adding_a_new_model/ADD_NEW_MODEL_PROPOSAL_TEMPLATE.md/0
{ "file_path": "transformers/templates/adding_a_new_model/ADD_NEW_MODEL_PROPOSAL_TEMPLATE.md", "repo_id": "transformers", "token_count": 14136 }
415
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest import numpy as np from parameterized import parameterized from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers.generation import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from ..test_modeling_tf_common import ids_tensor @require_tf class TFLogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = tf.ones((batch_size, length), dtype=tf.float32) / length return scores @parameterized.expand([(False,), (True,)]) def test_min_length_dist_processor(self, use_xla): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = TFMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) if use_xla: min_dist_processor = tf.function(min_dist_processor, jit_compile=True) # check that min length is applied at length 5 cur_len = 5 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len) self.assertListEqual(scores_before_min_length[:, eos_token_id].numpy().tolist(), 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 cur_len = 15 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf(scores_before_min_length)).numpy()) @parameterized.expand([(False,), (True,)]) def test_temperature_dist_warper(self, use_xla): input_ids = None cur_len = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) # tweak scores to not be uniform anymore scores = scores.numpy() scores[1, 5] = (1 / length) + 0.1 # peak, 1st batch scores[1, 10] = (1 / length) - 0.4 # valley, 1st batch scores = tf.convert_to_tensor(scores) # compute softmax probs = tf.nn.softmax(scores, axis=-1) temp_dist_warper_sharper = TFTemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = TFTemperatureLogitsWarper(temperature=1.3) if use_xla: temp_dist_warper_sharper = tf.function(temp_dist_warper_sharper, jit_compile=True) temp_dist_warper_smoother = tf.function(temp_dist_warper_smoother, jit_compile=True) warped_prob_sharp = tf.nn.softmax(temp_dist_warper_sharper(input_ids, tf.identity(scores), cur_len), axis=-1) warped_prob_smooth = tf.nn.softmax(temp_dist_warper_smoother(input_ids, tf.identity(scores), cur_len), axis=-1) # uniform distribution stays uniform tf.debugging.assert_near(probs[0, :], warped_prob_sharp[0, :], atol=1e-3) tf.debugging.assert_near(probs[0, :], warped_prob_smooth[0, :], atol=1e-3) # sharp peaks get higher, valleys get lower self.assertLess(tf.math.reduce_max(probs[1, :]), tf.math.reduce_max(warped_prob_sharp[1, :])) self.assertGreater(tf.math.reduce_min(probs[1, :]), tf.math.reduce_min(warped_prob_sharp[1, :])) # smooth peaks get lower, valleys get higher self.assertGreater(tf.math.reduce_max(probs[1, :]), tf.math.reduce_max(warped_prob_smooth[1, :])) self.assertLess(tf.math.reduce_min(probs[1, :]), tf.math.reduce_min(warped_prob_smooth[1, :])) @parameterized.expand([(False,), (True,)]) def test_repetition_penalty_dist_process(self, use_xla): vocab_size = 10 cur_len = 2 input_ids = tf.constant([[0, 1], [5, 0]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) scores = self._get_uniform_logits(batch_size=2, length=vocab_size) mask = tf.cast(tf.constant([[1] + 9 * [0], 10 * [0]]), tf.bool) scores = tf.where(mask, -1 / vocab_size, scores) mask = tf.cast(tf.constant([10 * [0], 5 * [0] + [1] + 4 * [0]]), tf.bool) scores = tf.where(mask, 4 / vocab_size, scores) rep_penalty_proc = TFRepetitionPenaltyLogitsProcessor(penalty=2.0) if use_xla: rep_penalty_proc = tf.function(rep_penalty_proc, jit_compile=True) scores = rep_penalty_proc(input_ids, tf.identity(scores), cur_len) # check that values were correctly changed (negative scores for used tokens should increase, others # should decrease) self.assertAlmostEqual(scores[0, 0].numpy(), -(1 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 1].numpy(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 2].numpy(), (1 / vocab_size)) # unused tokens should see no change self.assertAlmostEqual(scores[1, 0].numpy(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 5].numpy(), (4 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 2].numpy(), (1 / vocab_size)) # unused tokens should see no change @parameterized.expand([(False,), (True,)]) def test_top_k_dist_warper(self, use_xla): input_ids = None cur_len = None vocab_size = 10 batch_size = 2 # create ramp distribution ramp_logits = np.broadcast_to(np.arange(vocab_size, dtype=np.float32), (batch_size, vocab_size)).copy() ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = TFTopKLogitsWarper(3) if use_xla: top_k_warp = tf.function(top_k_warp, jit_compile=True) scores = top_k_warp(input_ids, ramp_logits, cur_len) # check that correct tokens are filtered self.assertListEqual(tf.math.is_inf(scores[0]).numpy().tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(tf.math.is_inf(scores[1]).numpy().tolist(), 2 * [True] + 3 * [False] + 5 * [True]) # check special cases length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) top_k_warp_safety_check = TFTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) if use_xla: top_k_warp_safety_check = tf.function(top_k_warp_safety_check, jit_compile=True) scores = top_k_warp_safety_check(input_ids, logits, cur_len) # uniform dist is not changed self.assertListEqual(tf.math.reduce_sum(tf.where(scores == 0.0, 1, 0), axis=-1).numpy().tolist(), [0, 0]) ramp_logits = np.broadcast_to(np.arange(length, dtype=np.float32), (batch_size, length)).copy() scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual(tf.math.reduce_sum(tf.where(scores == 0.0, 1, 0), axis=-1).numpy().tolist(), [2, 2]) @parameterized.expand([(False,), (True,)]) def test_top_p_dist_warper(self, use_xla): input_ids = None cur_len = None vocab_size = 10 batch_size = 2 # create distribution and take log (inverse to Softmax as taken in TFTopPLogitsWarper) dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], dtype=np.float32)) # top_p should have been 0.8 to test the edge case of top_p being exactly equal to sum of some token prob # However, due to the numerical instability of softmax in TF we choose this as the edge case # top_p as 0.8 passes when use_xla is True and fails when False. Refer PR #18984. top_p_warp = TFTopPLogitsWarper(0.79999995) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = tf.exp(top_p_warp(input_ids, dist, cur_len)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = tf.constant([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], dtype=tf.float32) tf.debugging.assert_near(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3) # check edge cases with negative and extreme logits ramp_logits = np.broadcast_to( np.arange(vocab_size, dtype=np.float32)[None, :], (batch_size, vocab_size) ).copy() - (vocab_size // 2) # make ramp_logits more extreme ramp_logits[1] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept top_p_warp = TFTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps # 2. self.assertListEqual( tf.math.reduce_sum(tf.where(filtered_dist != 0.0, 1, 0), axis=-1).numpy().tolist(), [3, 2] ) def test_no_repeat_ngram_dist_processor(self): vocab_size = 3 batch_size = 2 cur_len = 4 input_ids = tf.constant([[1, 1, 2, 1], [0, 1, 0, 1]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) scores = self._get_uniform_logits(batch_size, vocab_size) no_repeat_proc_2_gram = TFNoRepeatNGramLogitsProcessor(2) no_repeat_proc_3_gram = TFNoRepeatNGramLogitsProcessor(3) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, tf.identity(scores), cur_len) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, tf.identity(scores), cur_len) # 2-gram would forbid 2nd and 3rd token (1,2) at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( tf.math.is_inf(filtered_scores_2_gram).numpy().tolist(), [[False, True, True], [True, False, False]] ) # 3-gram would forbid no token at 1st batch and 1st token (0) at 2nd batch self.assertListEqual( tf.math.is_inf(filtered_scores_3_gram).numpy().tolist(), [[False, False, False], [True, False, False]] ) @parameterized.expand([(False,), (True,)]) def test_no_bad_words_dist_processor(self, use_xla): vocab_size = 5 batch_size = 2 eos_token_id = 4 cur_len = 4 input_ids = tf.constant([[0, 1, 3, 1], [0, 1, 0, 1]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] scores = self._get_uniform_logits(batch_size, vocab_size) no_bad_words_dist_proc = TFNoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id) if use_xla: no_bad_words_dist_proc = tf.function(no_bad_words_dist_proc, jit_compile=True) filtered_scores = no_bad_words_dist_proc(input_ids, tf.identity(scores), cur_len) # batch 1: 1st, 2nd, and 4th (0, 1, 3) token are forbidden # batch 2: 1st, 2nd, and 3rd (0, 1, 2) token are forbidden self.assertListEqual( tf.math.is_inf(filtered_scores).numpy().tolist(), [[True, True, False, True, True], [True, True, True, False, True]], ) @parameterized.expand([(False,), (True,)]) def test_forced_bos_token_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = TFForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that all scores are -inf except the bos_token_id score cur_len = 1 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue( tf.math.reduce_all(tf.math.is_inf(scores[:, bos_token_id + 1 :]) & (scores[:, bos_token_id + 1 :] < 0)) ) self.assertListEqual(scores[:, bos_token_id].numpy().tolist(), 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 cur_len = 4 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_forced_eos_token_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = TFForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that all scores are -inf except the eos_token_id when max_length-1 is reached cur_len = 4 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue( tf.math.reduce_all(tf.math.is_inf(scores[:, eos_token_id + 1 :]) & (scores[:, eos_token_id + 1 :] < 0)) ) self.assertListEqual( scores[:, eos_token_id].numpy().tolist(), 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length-1 is not reached cur_len = 3 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_suppress_tokens_at_begin_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 begin_suppress_tokens = [1, 2, 3] begin_index = 5 logits_processor = TFSuppressTokensAtBeginLogitsProcessor( begin_suppress_tokens=begin_suppress_tokens, begin_index=begin_index ) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # Check that no scores are suppressed if begin_index is not reached cur_len = 4 input_ids = tf.convert_to_tensor([[11, 17, 15, 8], [14, 0, 19, 5], [13, 11, 18, 19], [11, 12, 16, 15]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) # Check that scores are suppressed if begin_index is reached cur_len = 5 input_ids = tf.convert_to_tensor([[5, 5, 5, 0, 17], [18, 1, 9, 14, 17], [18, 6, 8, 15, 19], [8, 12, 17, 1, 2]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, begin_suppress_tokens, axis=1)))) @parameterized.expand([(False,), (True,)]) def test_suppress_tokens_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 suppress_tokens = [1, 3, 5] keep_tokens = [i for i in range(vocab_size) if i not in suppress_tokens] logits_processor = TFSuppressTokensLogitsProcessor(suppress_tokens=suppress_tokens) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # Check that suppress_tokens are suppressed and others are not cur_len = 5 input_ids = tf.convert_to_tensor([[0, 10, 19, 6, 3], [17, 4, 8, 17, 2], [7, 1, 11, 6, 15], [5, 8, 13, 16, 0]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, suppress_tokens, axis=1)))) self.assertFalse(tf.math.reduce_any(tf.math.is_inf(tf.gather(scores, keep_tokens, axis=1)))) @parameterized.expand([(False,), (True,)]) def test_force_tokens_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 force_token_map = {1: 2, 3: 2} logits_processor = TFForceTokensLogitsProcessor(force_token_map=force_token_map) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) # check that if the cur_len is contained in the force_token_map, the logits are the same # for all tokens except the one the force_token_map points to cur_len = 1 input_ids = tf.convert_to_tensor([[11], [7], [5], [15]]) ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) tf.debugging.assert_near(tf.gather(scores, [force_token_map[cur_len]], axis=1), 0.0) non_forced_inds = [i for i in range(vocab_size) if i != force_token_map[cur_len]] self.assertTrue( tf.math.reduce_all( tf.experimental.numpy.isclose( tf.gather(scores, [non_forced_inds], axis=1), tf.constant(scores.dtype.min), ) ) ) # check that if the cur_len is not contained in the force_token_map, the logits are not modified cur_len = 2 input_ids = tf.convert_to_tensor([[2, 19], [19, 15], [4, 9], [7, 6]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_processor_list(self, use_xla): # TODO (Joao): reintroduce TFNoRepeatNGramLogitsProcessor when it gets compatible with XLA batch_size = 4 cur_len = 10 vocab_size = 15 eos_token_id = 0 # dummy input_ids and scores input_ids = ids_tensor((batch_size, cur_len), vocab_size) input_ids_comp = tf.identity(input_ids) scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = tf.identity(scores) # instantiate all dist processors min_dist_proc = TFMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) temp_dist_warp = TFTemperatureLogitsWarper(temperature=0.5) rep_penalty_proc = TFRepetitionPenaltyLogitsProcessor(penalty=2.0) top_k_warp = TFTopKLogitsWarper(3) top_p_warp = TFTopPLogitsWarper(0.8) # no_repeat_proc = TFNoRepeatNGramLogitsProcessor(2) no_bad_words_dist_proc = TFNoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id) if use_xla: min_dist_proc = tf.function(min_dist_proc, jit_compile=True) temp_dist_warp = tf.function(temp_dist_warp, jit_compile=True) rep_penalty_proc = tf.function(rep_penalty_proc, jit_compile=True) top_k_warp = tf.function(top_k_warp, jit_compile=True) top_p_warp = tf.function(top_p_warp, jit_compile=True) # no_repeat_proc = tf.function(no_repeat_proc, jit_compile=True) no_bad_words_dist_proc = tf.function(no_bad_words_dist_proc, jit_compile=True) # no processor list scores = min_dist_proc(input_ids, scores, cur_len) scores = temp_dist_warp(input_ids, scores, cur_len) scores = rep_penalty_proc(input_ids, scores, cur_len) scores = top_k_warp(input_ids, scores, cur_len) scores = top_p_warp(input_ids, scores, cur_len) # scores = no_repeat_proc(input_ids, scores, cur_len) scores = no_bad_words_dist_proc(input_ids, scores, cur_len) # with processor list processor = TFLogitsProcessorList( [ min_dist_proc, temp_dist_warp, rep_penalty_proc, top_k_warp, top_p_warp, # no_repeat_proc, no_bad_words_dist_proc, ] ) scores_comp = processor(input_ids, scores_comp, cur_len) # remove inf scores = tf.where(tf.math.is_inf(scores), -1e9, scores) scores_comp = tf.where(tf.math.is_inf(scores_comp), -1e9, scores_comp) # scores should be equal tf.debugging.assert_near(scores, scores_comp, atol=1e-3) # input_ids should never be changed self.assertListEqual(input_ids.numpy().tolist(), input_ids_comp.numpy().tolist())
transformers/tests/generation/test_tf_logits_process.py/0
{ "file_path": "transformers/tests/generation/test_tf_logits_process.py", "repo_id": "transformers", "token_count": 10091 }
416
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Audio Spectrogram Transformer (AST) model.""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class ASTModelTester: def __init__( self, parent, batch_size=13, patch_size=2, max_length=24, num_mel_bins=16, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, frequency_stride=2, time_stride=2, attn_implementation="eager", ): self.parent = parent self.batch_size = batch_size self.patch_size = patch_size self.max_length = max_length self.num_mel_bins = num_mel_bins self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.frequency_stride = frequency_stride self.time_stride = time_stride self.attn_implementation = attn_implementation # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) frequency_out_dimension = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 time_out_dimension = (self.max_length - self.patch_size) // self.time_stride + 1 num_patches = frequency_out_dimension * time_out_dimension self.seq_length = num_patches + 2 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, input_values, labels def get_config(self): return ASTConfig( patch_size=self.patch_size, max_length=self.max_length, num_mel_bins=self.num_mel_bins, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, frequency_stride=self.frequency_stride, time_stride=self.time_stride, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, input_values, labels): model = ASTModel(config=config) model.to(torch_device) model.eval() result = model(input_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_values, labels, ) = config_and_inputs inputs_dict = {"input_values": input_values} return config, inputs_dict @require_torch class ASTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as AST does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def setUp(self): self.model_tester = ASTModelTester(self) self.config_tester = ConfigTester(self, config_class=ASTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "MIT/ast-finetuned-audioset-10-10-0.4593" model = ASTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on some audio from AudioSet def prepare_audio(): filepath = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset" ) audio, sampling_rate = torchaudio.load(filepath) return audio, sampling_rate @require_torch @require_torchaudio class ASTModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593") if is_torchaudio_available() else None ) @slow def test_inference_audio_classification(self): feature_extractor = self.default_feature_extractor model = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593").to(torch_device) feature_extractor = self.default_feature_extractor audio, sampling_rate = prepare_audio() audio = audio.squeeze().numpy() inputs = feature_extractor(audio, sampling_rate=sampling_rate, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 527)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.8760, -7.0042, -8.6602]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py/0
{ "file_path": "transformers/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py", "repo_id": "transformers", "token_count": 3928 }
417
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import BertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_MODEL_FOR_PRETRAINING_MAPPING from transformers.models.bert.modeling_tf_bert import ( TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertModel, ) class TFBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_causal_lm_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFBertModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) # Also check the case where encoder outputs are not passed result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_causal_lm_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFBertLMHeadModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFBertLMHeadModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) prediction_scores = result["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_causal_lm_model_past( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFBertLMHeadModel(config=config) # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and attn_mask next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) output_from_no_past = model(next_input_ids, output_hidden_states=True).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, output_hidden_states=True ).hidden_states[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_with_attn_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFBertLMHeadModel(config=config) # create attention mask half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) # first forward pass outputs = model(input_ids, attention_mask=attn_mask, use_cache=True) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) past_key_values = outputs.past_key_values # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat( [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)], axis=1, ) output_from_no_past = model( next_input_ids, attention_mask=attn_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True ).hidden_states[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6) def create_and_check_causal_lm_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True model = TFBertLMHeadModel(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = TFBertLMHeadModel(config=config) input_ids = input_ids[:1, :] input_mask = input_mask[:1, :] encoder_hidden_states = encoder_hidden_states[:1, :, :] encoder_attention_mask = encoder_attention_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, ).hidden_states[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ).hidden_states[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForNextSentencePrediction(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFBertForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFBertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFBertForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFBertModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFBertModel, TFBertForMaskedLM, TFBertLMHeadModel, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFBertModel, "fill-mask": TFBertForMaskedLM, "question-answering": TFBertForQuestionAnswering, "text-classification": TFBertForSequenceClassification, "text-generation": TFBertLMHeadModel, "token-classification": TFBertForTokenClassification, "zero-shot": TFBertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = True onnx_min_opset = 10 # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(TF_MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) return inputs_dict def setUp(self): self.model_tester = TFBertModelTester(self) self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): """Test the base model""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_causal_lm_base_model(self): """Test the base model of the causal LM model is_deocder=True, no cross_attention, no encoder outputs """ config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_base_model(*config_and_inputs) def test_model_as_decoder(self): """Test the base model as a decoder (of an encoder-decoder architecture) is_deocder=True + cross_attention + pass encoder outputs """ config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): """Test the causal LM model""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model(*config_and_inputs) def test_causal_lm_model_as_decoder(self): """Test the causal LM model as a decoder""" config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_causal_lm_model_as_decoder(*config_and_inputs) def test_causal_lm_model_past(self): """Test causal LM model with `past_key_values`""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past(*config_and_inputs) def test_causal_lm_model_past_with_attn_mask(self): """Test the causal LM model with `past_key_values` and `attention_mask`""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs) def test_causal_lm_model_past_with_large_inputs(self): """Test the causal LM model with `past_key_values` and a longer decoder sequence length""" config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): """Similar to `test_causal_lm_model_past_with_large_inputs` but with cross-attention""" config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_from_pretrained(self): model = TFBertModel.from_pretrained("jplu/tiny-tf-bert-random") self.assertIsNotNone(model) def test_custom_load_tf_weights(self): model, output_loading_info = TFBertForTokenClassification.from_pretrained( "jplu/tiny-tf-bert-random", output_loading_info=True ) self.assertEqual(sorted(output_loading_info["unexpected_keys"]), []) for layer in output_loading_info["missing_keys"]: self.assertTrue(layer.split("_")[0] in ["dropout", "classifier"]) # TODO (Joao): fix me @unittest.skip("Onnx compliancy broke with TF 2.10") def test_onnx_compliancy(self): pass @require_tf class TFBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFBertForPreTraining.from_pretrained("lysandre/tiny-bert-random") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 32000] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) expected_slice = tf.constant( [ [ [-0.05243197, -0.04498899, 0.05512108], [-0.07444685, -0.01064632, 0.04352357], [-0.05020351, 0.05530146, 0.00700043], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
transformers/tests/models/bert/test_modeling_tf_bert.py/0
{ "file_path": "transformers/tests/models/bert/test_modeling_tf_bert.py", "repo_id": "transformers", "token_count": 13599 }
418
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import BlipImageProcessor class BlipImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, do_pad=False, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): super().__init__() size = size if size is not None else {"height": 20, "width": 20} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, "do_pad": self.do_pad, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class BlipImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BlipImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = BlipImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_convert_rgb")) @require_torch @require_vision class BlipImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BlipImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = BlipImageProcessingTester(self, num_channels=4) self.expected_encoded_image_num_channels = 3 @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_convert_rgb")) @unittest.skip(reason="BlipImageProcessor does not support 4 channels yet") # FIXME Amy def test_call_numpy(self): return super().test_call_numpy() @unittest.skip(reason="BlipImageProcessor does not support 4 channels yet") # FIXME Amy def test_call_pytorch(self): return super().test_call_torch() @unittest.skip(reason="BLIP doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy def test_call_pil(self): pass @unittest.skip(reason="BLIP doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy def test_call_numpy_4_channels(self): pass
transformers/tests/models/blip/test_image_processing_blip.py/0
{ "file_path": "transformers/tests/models/blip/test_image_processing_blip.py", "repo_id": "transformers", "token_count": 2203 }
419
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Chinese-CLIP model.""" import inspect import os import tempfile import unittest import numpy as np import requests from transformers import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, ChineseCLIPModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) if is_vision_available(): from PIL import Image from transformers import ChineseCLIPProcessor class ChineseCLIPTextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): """ Returns a tiny configuration by default. """ return ChineseCLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ChineseCLIPTextModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = ChineseCLIPTextModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict class ChineseCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return ChineseCLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = ChineseCLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ChineseCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPTextModel,) if is_torch_available() else () fx_compatible = False # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = ChineseCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=ChineseCLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ChineseCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ChineseCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @require_torch class ChineseCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CHINESE_CLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ChineseCLIPVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ChineseCLIPVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=ChineseCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CHINESE_CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ChineseCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ChineseCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class ChineseCLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = ChineseCLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = ChineseCLIPVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): ( config, input_ids, token_type_ids, attention_mask, _, __, ___, ) = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, token_type_ids, attention_mask, pixel_values def get_config(self): return ChineseCLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, token_type_ids, attention_mask, pixel_values): model = ChineseCLIPModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask, token_type_ids) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class ChineseCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ChineseCLIPModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): text_kwargs = {"use_labels": False, "batch_size": 12} vision_kwargs = {"batch_size": 12} self.model_tester = ChineseCLIPModelTester(self, text_kwargs, vision_kwargs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="ChineseCLIPModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass # override as the `logit_scale` parameter initilization is different for CHINESE_CLIP def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for sub_config_key in ("vision_config", "text_config"): sub_config = getattr(configs_no_init, sub_config_key, {}) setattr(configs_no_init, sub_config_key, _config_zero_init(sub_config)) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # CHINESE_CLIP needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of Pikachu def prepare_img(): url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class ChineseCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "OFA-Sys/chinese-clip-vit-base-patch16" model = ChineseCLIPModel.from_pretrained(model_name).to(torch_device) processor = ChineseCLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, padding=True, return_tensors="pt" ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) probs = outputs.logits_per_image.softmax(dim=1) expected_probs = torch.tensor([[1.2686e-03, 5.4499e-02, 6.7968e-04, 9.4355e-01]], device=torch_device) self.assertTrue(torch.allclose(probs, expected_probs, atol=5e-3))
transformers/tests/models/chinese_clip/test_modeling_chinese_clip.py/0
{ "file_path": "transformers/tests/models/chinese_clip/test_modeling_chinese_clip.py", "repo_id": "transformers", "token_count": 12682 }
420
# coding=utf-8 # Copyright 2018 Salesforce and HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import backend_empty_cache, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class CTRLModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) mc_token_ids = None if self.use_mc_token_ids: mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def get_config(self): return CTRLConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, dff=self.intermediate_size, # hidden_act=self.hidden_act, # hidden_dropout_prob=self.hidden_dropout_prob, # attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, # type_vocab_size=self.type_vocab_size, # initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = CTRLModel(config=config) model.to(torch_device) model.eval() model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layer) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = CTRLLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask} return config, inputs_dict def create_and_check_ctrl_for_sequence_classification(self, config, input_ids, head_mask, token_type_ids, *args): config.num_labels = self.num_labels model = CTRLForSequenceClassification(config) model.to(torch_device) model.eval() sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) @require_torch class CTRLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () all_generative_model_classes = (CTRLLMHeadModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) test_pruning = True test_resize_embeddings = False test_head_masking = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def setUp(self): self.model_tester = CTRLModelTester(self) self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) def test_config(self): self.config_tester.run_common_tests() def test_ctrl_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*config_and_inputs) def test_ctrl_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "Salesforce/ctrl" model = CTRLModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class CTRLModelLanguageGenerationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow def test_lm_generate_ctrl(self): model = CTRLLMHeadModel.from_pretrained("Salesforce/ctrl") model.to(torch_device) input_ids = torch.tensor( [[11859, 0, 1611, 8]], dtype=torch.long, device=torch_device ) # Legal the president is expected_output_ids = [ 11859, 0, 1611, 8, 5, 150, 26449, 2, 19, 348, 469, 3, 2595, 48, 20740, 246533, 246533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
transformers/tests/models/ctrl/test_modeling_ctrl.py/0
{ "file_path": "transformers/tests/models/ctrl/test_modeling_ctrl.py", "repo_id": "transformers", "token_count": 4756 }
421
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the TensorFlow DeiT model.""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.modeling_tf_utils import keras if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class TFDeiTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, scope=None, encoder_stride=2, attn_implementation="eager", ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.encoder_stride = encoder_stride self.attn_implementation = attn_implementation # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return DeiTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, pixel_values, labels): model = TFDeiTModel(config=config) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = TFDeiTForMaskedImageModeling(config=config) result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = TFDeiTForMaskedImageModeling(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFDeiTForImageClassification(config) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = TFDeiTForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFDeiTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_tf_common.py, as DeiT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFDeiTModelTester(self) self.config_tester = ConfigTester(self, config_class=DeiTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Dense)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) # special case for DeiTForImageClassificationWithTeacher model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def test_model_from_pretrained(self): model_name = "facebook/deit-base-distilled-patch16-224" model = TFDeiTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class DeiTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-1.0266, 0.1912, -1.2861]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_interpolate_pos_encoding(self): model = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224") image_processor = self.default_image_processor # image size is {"height": 480, "width": 640} image = prepare_img() image_processor.size = {"height": 480, "width": 640} # center crop set to False so image is not center cropped to 224x224 inputs = image_processor(images=image, return_tensors="tf", do_center_crop=False) # forward pass outputs = model(**inputs, interpolate_pos_encoding=True) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape)
transformers/tests/models/deit/test_modeling_tf_deit.py/0
{ "file_path": "transformers/tests/models/deit/test_modeling_tf_deit.py", "repo_id": "transformers", "token_count": 4887 }
422
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, Allegro.pl and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import HerbertTokenizer, HerbertTokenizerFast from transformers.models.herbert.tokenization_herbert import VOCAB_FILES_NAMES from transformers.testing_utils import get_tests_dir, require_sacremoses, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_sacremoses @require_tokenizers class HerbertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "allegro/herbert-base-cased" tokenizer_class = HerbertTokenizer rust_tokenizer_class = HerbertTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() # Use a simpler test file without japanese/chinese characters with open(f"{get_tests_dir()}/fixtures/sample_text_no_unicode.txt", encoding="utf-8") as f_data: self._data = f_data.read().replace("\n\n", "\n").strip() vocab = [ "<s>", "</s>", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", ",</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(vocab_file=self.vocab_file, merges_file=self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [16, 17, 23] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "lower,newer" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("allegro/herbert-base-cased") text = tokenizer.encode("konstruowanie sekwencji", add_special_tokens=False) text_2 = tokenizer.encode("konstruowanie wielu sekwencji", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [0] + text + [2] assert encoded_pair == [0] + text + [2] + text_2 + [2] @unittest.skip( "Test passes if run individually but not with the full tests (internal state of the tokenizer is modified). Will fix later" ) def test_training_new_tokenizer_with_special_tokens_change(self): pass @unittest.skip( "Test passes if run individually but not with the full tests (internal state of the tokenizer is modified). Will fix later" ) def test_training_new_tokenizer(self): pass
transformers/tests/models/herbert/test_tokenization_herbert.py/0
{ "file_path": "transformers/tests/models/herbert/test_tokenization_herbert.py", "repo_id": "transformers", "token_count": 2218 }
423
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from io import BytesIO import requests from transformers import Idefics2Processor from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image @require_torch @require_vision class Idefics2ProcessorTest(unittest.TestCase): def setUp(self): self.processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b", image_seq_len=2) self.image1 = Image.open( BytesIO( requests.get( "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" ).content ) ) self.image2 = Image.open( BytesIO(requests.get("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg").content) ) self.image3 = Image.open( BytesIO( requests.get( "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg" ).content ) ) self.bos_token = self.processor.tokenizer.bos_token self.image_token = self.processor.image_token.content self.fake_image_token = self.processor.fake_image_token.content self.bos_token_id = self.processor.tokenizer.convert_tokens_to_ids(self.bos_token) self.image_token_id = self.processor.tokenizer.convert_tokens_to_ids(self.image_token) self.fake_image_token_id = self.processor.tokenizer.convert_tokens_to_ids(self.fake_image_token) self.image_seq_len = self.processor.image_seq_len def test_process_interleaved_images_prompts_no_image_splitting(self): old_image_splitting = self.processor.image_processor.do_image_splitting self.processor.image_processor.do_image_splitting = False # Test that a single image is processed correctly inputs = self.processor(images=self.image1) self.assertEqual(inputs["pixel_values"].shape, (1, 1, 3, 653, 980)) self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 1, 653, 980)) # fmt: on # Test a single sample with image and text image_str = "<image>" text_str = "In this image, we see" text = image_str + text_str inputs = self.processor(text=text, images=self.image1) # fmt: off tokenized_sentence = self.processor.tokenizer(text_str, add_special_tokens=False) expected_input_ids = [[self.bos_token_id] + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] + tokenized_sentence["input_ids"]] self.assertEqual(inputs["input_ids"], expected_input_ids) self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])]) self.assertEqual(inputs["pixel_values"].shape, (1, 1, 3, 653, 980)) self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 1, 653, 980)) # fmt: on # Test that batch is correctly processed image_str = "<image>" text_str_1 = "In this image, we see" text_str_2 = "bla, bla" text = [ image_str + text_str_1, text_str_2 + image_str + image_str, ] images = [[self.image1], [self.image2, self.image3]] inputs = self.processor(text=text, images=images, padding=True) # fmt: off tokenized_sentence_1 = self.processor.tokenizer(text_str_1, add_special_tokens=False) tokenized_sentence_2 = self.processor.tokenizer(text_str_2, add_special_tokens=False) expected_input_ids_1 = [self.bos_token_id] + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] + tokenized_sentence_1["input_ids"] expected_input_ids_2 = [self.bos_token_id] + tokenized_sentence_2["input_ids"] + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len + [self.fake_image_token_id] # Pad the first input to match the second input pad_len = len(expected_input_ids_2) - len(expected_input_ids_1) padded_expected_input_ids_1 = [0] * pad_len + expected_input_ids_1 self.assertEqual( inputs["input_ids"], [padded_expected_input_ids_1, expected_input_ids_2] ) self.assertEqual( inputs["attention_mask"], [[0] * pad_len + [1] * len(expected_input_ids_1), [1] * len(expected_input_ids_2)] ) self.assertEqual(inputs['pixel_values'].shape, (2, 2, 3, 767, 980)) self.assertEqual(inputs['pixel_attention_mask'].shape, (2, 2, 767, 980)) # fmt: on self.processor.image_processor.do_image_splitting = old_image_splitting def test_process_interleaved_images_prompts_image_splitting(self): old_image_splitting = self.processor.image_processor.do_image_splitting self.processor.image_processor.do_image_splitting = True # Test that a single image is processed correctly inputs = self.processor(images=self.image1) self.assertEqual(inputs["pixel_values"].shape, (1, 5, 3, 653, 980)) self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 5, 653, 980)) # fmt: on # Test a single sample with image and text image_str = "<image>" text_str = "In this image, we see" text = image_str + text_str inputs = self.processor(text=text, images=self.image1) # fmt: off tokenized_sentence = self.processor.tokenizer(text_str, add_special_tokens=False) expected_input_ids = [[self.bos_token_id] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + [self.fake_image_token_id] + tokenized_sentence["input_ids"]] self.assertEqual(inputs["input_ids"], expected_input_ids) self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])]) self.assertEqual(inputs["pixel_values"].shape, (1, 5, 3, 653, 980)) self.assertEqual(inputs["pixel_attention_mask"].shape, (1, 5, 653, 980)) # fmt: on # Test that batch is correctly processed image_str = "<image>" text_str_1 = "In this image, we see" text_str_2 = "bla, bla" text = [ image_str + text_str_1, text_str_2 + image_str + image_str, ] images = [[self.image1], [self.image2, self.image3]] inputs = self.processor(text=text, images=images, padding=True) # fmt: off tokenized_sentence_1 = self.processor.tokenizer(text_str_1, add_special_tokens=False) tokenized_sentence_2 = self.processor.tokenizer(text_str_2, add_special_tokens=False) expected_input_ids_1 = [self.bos_token_id] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + [self.fake_image_token_id] + tokenized_sentence_1["input_ids"] expected_input_ids_2 = [self.bos_token_id] + tokenized_sentence_2["input_ids"] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * 5 + [self.fake_image_token_id] # Pad the first input to match the second input pad_len = len(expected_input_ids_2) - len(expected_input_ids_1) padded_expected_input_ids_1 = [0] * pad_len + expected_input_ids_1 self.assertEqual( inputs["input_ids"], [padded_expected_input_ids_1, expected_input_ids_2] ) self.assertEqual( inputs["attention_mask"], [[0] * pad_len + [1] * len(expected_input_ids_1), [1] * len(expected_input_ids_2)] ) self.assertEqual(inputs['pixel_values'].shape, (2, 10, 3, 767, 980)) self.assertEqual(inputs['pixel_attention_mask'].shape, (2, 10, 767, 980)) # fmt: on self.processor.image_processor.do_image_splitting = old_image_splitting def test_add_special_tokens_processor(self): image_str = "<image>" text_str = "In this image, we see" text = text_str + image_str n_image_repeat = 5 if self.processor.image_processor.do_image_splitting else 1 # fmt: off inputs = self.processor(text=text, images=self.image1, add_special_tokens=False) tokenized_sentence = self.processor.tokenizer(text_str, add_special_tokens=False) expected_input_ids = [tokenized_sentence["input_ids"] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * n_image_repeat + [self.fake_image_token_id]] self.assertEqual(inputs["input_ids"], expected_input_ids) inputs = self.processor(text=text, images=self.image1) expected_input_ids = [[self.bos_token_id] + tokenized_sentence["input_ids"] + ([self.fake_image_token_id] + [self.image_token_id] * self.image_seq_len) * n_image_repeat + [self.fake_image_token_id]] self.assertEqual(inputs["input_ids"], expected_input_ids) # fmt: on def test_apply_chat_template(self): # Message contains content which a mix of lists with images and image urls and string messages = [ { "role": "user", "content": [ {"type": "text", "text": "What do these images show?"}, {"type": "image"}, {"type": "image"}, "What do these images show?", ], }, { "role": "assistant", "content": [ { "type": "text", "text": "The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.", } ], }, {"role": "user", "content": [{"type": "text", "text": "And who is that?"}]}, ] processor = self.processor # Make short sequence length to test that the fake tokens are added correctly rendered = processor.apply_chat_template(messages, add_generation_prompt=True) expected_rendered = ( "User: What do these images show?<image><image><end_of_utterance>\n" "Assistant: The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.<end_of_utterance>\n" "User: And who is that?<end_of_utterance>\n" "Assistant:" ) self.assertEqual(rendered, expected_rendered)
transformers/tests/models/idefics2/test_processing_idefics2.py/0
{ "file_path": "transformers/tests/models/idefics2/test_processing_idefics2.py", "repo_id": "transformers", "token_count": 4942 }
424
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch LLaMA model.""" import gc import tempfile import unittest import pytest from packaging import version from parameterized import parameterized from transformers import AutoTokenizer, LlamaConfig, StaticCache, is_torch_available, set_seed from transformers.testing_utils import ( require_bitsandbytes, require_flash_attn, require_read_token, require_torch, require_torch_gpu, require_torch_sdpa, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LlamaForCausalLM, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaTokenizer, ) from transformers.models.llama.modeling_llama import LlamaLinearScalingRotaryEmbedding, LlamaRotaryEmbedding class LlamaModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones(self.batch_size, self.seq_length)).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return LlamaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LlamaModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = LlamaModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = LlamaForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = LlamaForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class LlamaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification, LlamaForQuestionAnswering, LlamaForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (LlamaForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, "question-answering": LlamaForQuestionAnswering, "token-classification": LlamaForTokenClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = True # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] # used in `test_torch_compile` _torch_compile_test_ckpt = "meta-llama/Llama-2-7b-hf" def setUp(self): self.model_tester = LlamaModelTester(self) self.config_tester = ConfigTester(self, config_class=LlamaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_llama_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = LlamaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_llama_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = LlamaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_llama_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = LlamaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_llama_token_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels) model = LlamaForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=token_labels) self.assertEqual( result.logits.shape, (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), ) @unittest.skip(reason="Llama buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @parameterized.expand([("linear",), ("dynamic",), ("yarn",)]) def test_model_rope_scaling_from_config(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = LlamaModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = LlamaModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) def test_model_rope_scaling(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) # Inputs x = torch.randn(1, dtype=torch.float32, device=torch_device) # used exlusively to get the dtype and the device position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device) position_ids_short = position_ids_short.unsqueeze(0) position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device) position_ids_long = position_ids_long.unsqueeze(0) # Sanity check original RoPE original_rope = LlamaRotaryEmbedding(config=config).to(torch_device) original_cos_short, original_sin_short = original_rope(x, position_ids_short) original_cos_long, original_sin_long = original_rope(x, position_ids_long) torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :]) torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :]) # Sanity check linear RoPE scaling # New position "x" should match original position with index "x/scaling_factor" config.rope_scaling = {"type": "linear", "factor": scaling_factor} linear_scaling_rope = LlamaRotaryEmbedding(config=config).to(torch_device) linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short) linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long) torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :]) torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :]) for new_position in range(0, long_input_length, scaling_factor): original_position = int(new_position // scaling_factor) torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :]) torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :]) # Sanity check Dynamic NTK RoPE scaling # Scaling should only be observed after a long input is fed. We can observe that the frequencies increase # with scaling_factor (or that `inv_freq` decreases) config.rope_scaling = {"type": "dynamic", "factor": scaling_factor} ntk_scaling_rope = LlamaRotaryEmbedding(config=config).to(torch_device) ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short) ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long) torch.testing.assert_close(ntk_cos_short, original_cos_short) torch.testing.assert_close(ntk_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_sin_long, original_sin_long) self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all()) # Sanity check Yarn RoPE scaling # Scaling should be over the entire input config.rope_scaling = {"type": "yarn", "factor": scaling_factor} yarn_scaling_rope = LlamaRotaryEmbedding(config=config).to(torch_device) yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short) yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long) torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :]) torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :]) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_short, original_cos_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_long, original_sin_long) def test_rope_class_retrocompatibility(self): # Delete me when we remove compatibility for the old API :) config, _ = self.model_tester.prepare_config_and_inputs_for_common() scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) config.rope_scaling = {"type": "linear", "factor": 10} # Inputs x = torch.randn(1, dtype=torch.float32, device=torch_device) # used exlusively to get the dtype and the device position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device) position_ids_short = position_ids_short.unsqueeze(0) position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device) position_ids_long = position_ids_long.unsqueeze(0) # Old API -- under the hood, "type": "linear" is set and `LlamaRotaryEmbedding` is called old_api_rope = LlamaLinearScalingRotaryEmbedding( config.hidden_size // config.num_attention_heads, max_position_embeddings=config.max_position_embeddings, base=config.rope_theta, scaling_factor=scaling_factor, ).to(torch_device) old_cos_short, old_sin_short = old_api_rope(x, position_ids_short) old_cos_long, old_sin_long = old_api_rope(x, position_ids_long) # New API config.rope_scaling = {"type": "linear", "factor": scaling_factor} new_api_rope = LlamaRotaryEmbedding(config=config).to(torch_device) new_cos_short, new_sin_short = new_api_rope(x, position_ids_short) new_cos_long, new_sin_long = new_api_rope(x, position_ids_long) # The results should match torch.testing.assert_close(old_cos_short, new_cos_short) torch.testing.assert_close(old_sin_short, new_sin_short) torch.testing.assert_close(old_cos_long, new_cos_long) torch.testing.assert_close(old_sin_long, new_sin_long) def test_model_loading_old_rope_configs(self): def _reinitialize_config(base_config, new_kwargs): # Reinitialize the config with the new kwargs, forcing the config to go through its __init__ validation # steps. base_config_dict = base_config.to_dict() new_config = LlamaConfig.from_dict(config_dict={**base_config_dict, **new_kwargs}) return new_config # from untouched config -> ✅ base_config, model_inputs = self.model_tester.prepare_config_and_inputs_for_common() original_model = LlamaForCausalLM(base_config).to(torch_device) original_model(**model_inputs) # from a config with the expected rope configuration -> ✅ config = _reinitialize_config(base_config, {"rope_scaling": {"rope_type": "linear", "factor": 10.0}}) original_model = LlamaForCausalLM(config).to(torch_device) original_model(**model_inputs) # from a config with the old rope configuration ('type' instead of 'rope_type') -> ✅ we gracefully handle BC config = _reinitialize_config(base_config, {"rope_scaling": {"type": "linear", "factor": 10.0}}) original_model = LlamaForCausalLM(config).to(torch_device) original_model(**model_inputs) # from a config with both 'type' and 'rope_type' -> ✅ they can coexist (and both are present in the config) config = _reinitialize_config( base_config, {"rope_scaling": {"type": "linear", "rope_type": "linear", "factor": 10.0}} ) self.assertTrue(config.rope_scaling["type"] == "linear") self.assertTrue(config.rope_scaling["rope_type"] == "linear") original_model = LlamaForCausalLM(config).to(torch_device) original_model(**model_inputs) # from a config with parameters in a bad range ('factor' should be >= 1.0) -> ⚠️ throws a warning with self.assertLogs("transformers.modeling_rope_utils", level="WARNING") as logs: config = _reinitialize_config(base_config, {"rope_scaling": {"rope_type": "linear", "factor": -999.0}}) original_model = LlamaForCausalLM(config).to(torch_device) original_model(**model_inputs) self.assertEqual(len(logs.output), 1) self.assertIn("factor field", logs.output[0]) # from a config with unknown parameters ('foo' isn't a rope option) -> ⚠️ throws a warning with self.assertLogs("transformers.modeling_rope_utils", level="WARNING") as logs: config = _reinitialize_config( base_config, {"rope_scaling": {"rope_type": "linear", "factor": 10.0, "foo": "bar"}} ) original_model = LlamaForCausalLM(config).to(torch_device) original_model(**model_inputs) self.assertEqual(len(logs.output), 1) self.assertIn("Unrecognized keys", logs.output[0]) # from a config with specific rope type but missing one of its mandatory parameters -> ❌ throws exception with self.assertRaises(KeyError): config = _reinitialize_config(base_config, {"rope_scaling": {"rope_type": "linear"}}) # missing "factor" @require_flash_attn @require_torch_gpu @require_bitsandbytes @pytest.mark.flash_attn_test @require_read_token @slow def test_flash_attn_2_generate_padding_right(self): """ Overwritting the common test as the test is flaky on tiny models """ model = LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", load_in_4bit=True, device_map={"": 0}, ) tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") texts = ["hi", "Hello this is a very long sentence"] tokenizer.padding_side = "right" tokenizer.pad_token = tokenizer.eos_token inputs = tokenizer(texts, return_tensors="pt", padding=True).to(0) output_native = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_native = tokenizer.batch_decode(output_native) model = LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", load_in_4bit=True, device_map={"": 0}, attn_implementation="flash_attention_2" ) output_fa_2 = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_fa_2 = tokenizer.batch_decode(output_fa_2) self.assertListEqual(output_native, output_fa_2) @require_flash_attn @require_torch_gpu @slow @pytest.mark.flash_attn_test def test_use_flash_attention_2_true(self): """ NOTE: this is the only test testing that the legacy `use_flash_attention=2` argument still works as intended. """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) new_model = LlamaForCausalLM.from_pretrained( tmp_dir, use_flash_attention_2=True, torch_dtype=torch.float16 ).to("cuda") self.assertTrue(new_model.config._attn_implementation == "flash_attention_2") has_flash = False for name, submodule in new_model.named_modules(): if "FlashAttention" in submodule.__class__.__name__: has_flash = True break if not has_flash: raise ValueError("The flash model should have flash attention layers") @require_torch_sdpa @slow def test_eager_matches_sdpa_generate(self): """ Overwritting the common test as the test is flaky on tiny models """ max_new_tokens = 30 tokenizer = LlamaTokenizer.from_pretrained("saibo/llama-1B") model_sdpa = LlamaForCausalLM.from_pretrained( "saibo/llama-1B", torch_dtype=torch.float16, low_cpu_mem_usage=True, ).to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = LlamaForCausalLM.from_pretrained( "saibo/llama-1B", torch_dtype=torch.float16, low_cpu_mem_usage=True, attn_implementation="eager", ).to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): if "SdpaAttention" in submodule.__class__.__name__: has_sdpa = True break if not has_sdpa: raise ValueError("The SDPA model should have SDPA attention layers") texts = [ "hi here's a longer context, getting longer and", "Hello this is a very long sentence my friend, very long for real", "Today I am in Paris and", ] for padding_side in ["left", "right"]: tokenizer.padding_side = padding_side tokenizer.pad_token = tokenizer.eos_token inputs = tokenizer(texts, return_tensors="pt", padding=True).to(torch_device) res_eager = model_eager.generate(**inputs, max_new_tokens=max_new_tokens, do_sample=False) res_sdpa = model_sdpa.generate(**inputs, max_new_tokens=max_new_tokens, do_sample=False) with self.subTest(f"{padding_side}"): torch.testing.assert_close( res_eager, res_sdpa, msg=f"\n{tokenizer.batch_decode(res_eager)} \nvs\n{tokenizer.batch_decode(res_sdpa)}", ) @require_torch_gpu class LlamaIntegrationTest(unittest.TestCase): # This variable is used to determine which CUDA device are we using for our runners (A10 or T4) # Depending on the hardware we get different logits / generations cuda_compute_capability_major_version = None @classmethod def setUpClass(cls): if is_torch_available() and torch.cuda.is_available(): # 8 is for A100 / A10 and 7 for T4 cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0] @slow @require_read_token def test_llama_3_1_hard(self): """ An integration test for llama 3.1. It tests against a long output to ensure the subtle numerical differences from llama 3.1.'s RoPE can be detected """ # diff on `EXPECTED_TEXT`: # 2024-08-26: updating from torch 2.3.1 to 2.4.0 slightly changes the results. EXPECTED_TEXT = ( "Tell me about the french revolution. The french revolution was a period of radical political and social " "upheaval in France that lasted from 1789 until 1799. It was a time of great change and upheaval, marked " "by the overthrow of the monarchy, the rise of the middle class, and the eventual establishment of the " "First French Republic.\nThe revolution began in 1789 with the Estates-General, a representative " "assembly that had not met since 1614. The Third Estate, which represented the common people, " "demanded greater representation and eventually broke away to form the National Assembly. This marked " "the beginning of the end of the absolute monarchy and the rise of the middle class.\n" ) tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct") model = LlamaForCausalLM.from_pretrained( "meta-llama/Meta-Llama-3.1-8B-Instruct", device_map="auto", torch_dtype=torch.bfloat16 ) input_text = ["Tell me about the french revolution."] model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device) generated_ids = model.generate(**model_inputs, max_new_tokens=128, do_sample=False) generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(generated_text, EXPECTED_TEXT) @slow @require_read_token def test_model_7b_logits_bf16(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.bfloat16, attn_implementation="eager" ) with torch.no_grad(): out = model(torch.tensor([input_ids]).to(torch_device)) # Expected mean on dim = -1 # fmt: off EXPECTED_MEAN = { 7: torch.tensor([[-6.5061, -4.1147, -4.9669, -3.2038, 0.8069, -2.9694, 1.2864, -3.3786]]), 8: torch.tensor([[-6.5208, -4.1218, -4.9377, -3.2536, 0.8127, -2.9811, 1.2918, -3.3848]]) } self.assertTrue(torch.allclose(EXPECTED_MEAN[self.cuda_compute_capability_major_version].to(torch_device), out.logits.mean(-1), atol=1e-2, rtol=1e-2)) # slicing logits[0, 0, 0:15] EXPECTED_SLICE = { 7: torch.tensor([[-12.5000, -7.0625, -0.6289, -7.8750, -6.9688, -7.8125, -6.4688, -7.4375, -7.6875, -6.9375, -6.0312, -7.0000, -1.8594, 1.8438, -8.5000]]), 8: torch.tensor([[-12.5625, -7.1250, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9688, -6.0312, -7.0312, -1.8203, 1.8750, -8.5000]]) } # fmt: on self.assertTrue( torch.allclose( EXPECTED_SLICE[self.cuda_compute_capability_major_version].to(torch_device), out.logits[0, 0, :15], atol=1e-2, rtol=1e-2, ) ) @slow @require_read_token def test_model_7b_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.float16 ) with torch.no_grad(): out = model(torch.tensor([input_ids]).to(torch_device)) # fmt: off # Expected mean on dim = -1 EXPECTED_MEAN = { 7: torch.tensor([[-6.6420, -4.1227, -4.9809, -3.2041, 0.8261, -3.0052, 1.2957, -3.3648]]), 8: torch.tensor([[-6.6544, -4.1259, -4.9840, -3.2456, 0.8261, -3.0124, 1.2971, -3.3641]]) } self.assertTrue(torch.allclose(EXPECTED_MEAN[self.cuda_compute_capability_major_version].to(torch_device), out.logits.mean(-1), atol=1e-2, rtol=1e-2)) # slicing logits[0, 0, 0:15] EXPECTED_SLICE = { 7: torch.tensor([-12.8125, -7.3359, -0.4846, -8.0234, -7.2383, -7.9922, -6.4805, -7.7344, -7.8125, -7.0078, -6.1797, -7.1094, -1.8633, 1.9736, -8.6016]), 8: torch.tensor([-12.8281, -7.4609, -0.4668, -8.0703, -7.2539, -8.0078, -6.4961, -7.7734, -7.8516, -7.0352, -6.2188, -7.1367, -1.8564, 1.9922, -8.6328]) } # fmt: on self.assertTrue( torch.allclose( EXPECTED_SLICE[self.cuda_compute_capability_major_version].to(torch_device), out.logits[0, 0, :15], atol=1e-2, rtol=1e-2, ) ) @slow def test_model_7b_dola_generation(self): # ground truth text generated with dola_layers="low", repetition_penalty=1.2 EXPECTED_TEXT_COMPLETION = ( "Simply put, the theory of relativity states that 1) time and space are relative, and 2) the laws of " "physics are the same for all observers in uniform motion relative to one another.\n\nThe theory of " "relativity was developed by Albert Einstein in the early 20th century, and it revolutionized our " "understanding of space and time." ) prompt = "Simply put, the theory of relativity states that " tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") model = LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-chat-hf", device_map="sequential", torch_dtype=torch.float16 ) model_inputs = tokenizer(prompt, return_tensors="pt").to(model.device) # greedy generation outputs generated_ids = model.generate( **model_inputs, max_new_tokens=64, top_p=None, temperature=1, do_sample=False, dola_layers="low" ) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @slow @require_torch_gpu @require_read_token def test_compile_static_cache(self): # `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2 # work as intended. See https://github.com/pytorch/pytorch/issues/121943 if version.parse(torch.__version__) < version.parse("2.3.0"): self.skipTest(reason="This test requires torch >= 2.3 to run.") NUM_TOKENS_TO_GENERATE = 40 # Note on `EXPECTED_TEXT_COMPLETION`'s diff: the current value matches the original test if the original test # was changed to have a cache of 53 tokens (as opposed to 4096), on Ampere GPUs. EXPECTED_TEXT_COMPLETION = [ "Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial " "reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe " "theory of relativ", "My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, " "my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p", ] prompts = [ "Simply put, the theory of relativity states that ", "My favorite all time favorite condiment is ketchup.", ] tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", pad_token="</s>", padding_side="right") model = LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", device_map="sequential", torch_dtype=torch.float16 ) inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) # Dynamic Cache generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False) dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text) # Static Cache generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static" ) static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text) # Static Cache + compile model._cache = None # clear cache object, initialized when we pass `cache_implementation="static"` model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static" ) static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text) @slow @require_torch_gpu class Mask4DTestHard(unittest.TestCase): def tearDown(self): gc.collect() torch.cuda.empty_cache() def setUp(self): model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" self.model_dtype = torch.float32 self.tokenizer = LlamaTokenizer.from_pretrained(model_name) self.model = LlamaForCausalLM.from_pretrained(model_name, torch_dtype=self.model_dtype).to(torch_device) def get_test_data(self): template = "my favorite {}" items = ("pet is a", "artist plays a", "name is L") # same number of tokens in each item batch_separate = [template.format(x) for x in items] # 3 separate lines batch_shared_prefix = template.format(" ".join(items)) # 1 line with options concatenated input_ids = self.tokenizer(batch_separate, return_tensors="pt").input_ids.to(torch_device) input_ids_shared_prefix = self.tokenizer(batch_shared_prefix, return_tensors="pt").input_ids.to(torch_device) mask_shared_prefix = torch.tensor( [ [ [ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1], ] ] ], device=torch_device, ) position_ids = torch.arange(input_ids.shape[1]).tile(input_ids.shape[0], 1).to(torch_device) # building custom positions ids based on custom mask position_ids_shared_prefix = (mask_shared_prefix.sum(dim=-1) - 1).reshape(1, -1) # effectively: position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5]]).to(device) # inverting the mask min_dtype = torch.finfo(self.model_dtype).min mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=self.model_dtype) * min_dtype return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix def test_stacked_causal_mask(self): ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self.get_test_data() # regular batch logits = self.model.forward(input_ids, position_ids=position_ids).logits logits_last = logits[:, -1, :] # last tokens in each batch line decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)] # single forward run with 4D custom mask logits_shared_prefix = self.model.forward( input_ids_shared_prefix, attention_mask=mask_shared_prefix, position_ids=position_ids_shared_prefix ).logits logits_shared_prefix_last = logits_shared_prefix[ 0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], : ] # last three tokens decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)] self.assertEqual(decoded, decoded_shared_prefix) def test_partial_stacked_causal_mask(self): # Same as the test above, but the input is passed in two groups. It tests that we can pass partial 4D attention masks ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self.get_test_data() # regular batch logits = self.model.forward(input_ids, position_ids=position_ids).logits logits_last = logits[:, -1, :] # last tokens in each batch line decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)] # 2 forward runs with custom 4D masks part_a = 3 # split point input_1a = input_ids_shared_prefix[:, :part_a] position_ids_1a = position_ids_shared_prefix[:, :part_a] mask_1a = mask_shared_prefix[:, :, :part_a, :part_a] outs_1a = self.model.forward(input_1a, attention_mask=mask_1a, position_ids=position_ids_1a) past_key_values_a = outs_1a["past_key_values"] # Case 1: we pass a 4D attention mask regarding the current sequence length (i.e. [..., seq_len, full_len]) input_1b = input_ids_shared_prefix[:, part_a:] position_ids_1b = position_ids_shared_prefix[:, part_a:] mask_1b = mask_shared_prefix[:, :, part_a:, :] outs_1b = self.model.forward( input_1b, attention_mask=mask_1b, position_ids=position_ids_1b, past_key_values=past_key_values_a, ) decoded_1b = [ self.tokenizer.decode(t) for t in outs_1b.logits.argmax(-1)[ 0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1] - part_a ] ] self.assertEqual(decoded, decoded_1b) def test_stacked_causal_mask_static_cache(self): """same as above but with StaticCache""" ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self.get_test_data() # regular batch logits = self.model.forward(input_ids, position_ids=position_ids).logits logits_last = logits[:, -1, :] # last tokens in each batch line decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)] # upgrade the model with StaticCache max_cache_len = 16 # note that max_cache_len is greater than the attention_mask.shape[-1] past_key_values = StaticCache( config=self.model.config, batch_size=1, max_cache_len=max_cache_len, device=torch_device, dtype=self.model.dtype, ) padded_attention_mask = torch.nn.functional.pad( input=mask_shared_prefix, pad=(0, max_cache_len - mask_shared_prefix.shape[-1]), mode="constant", value=torch.finfo(self.model_dtype).min, ) # single forward run with 4D custom mask logits_shared_prefix = self.model.forward( input_ids_shared_prefix, attention_mask=padded_attention_mask, position_ids=position_ids_shared_prefix, cache_position=torch.arange(input_ids_shared_prefix.shape[-1], device=torch_device), past_key_values=past_key_values, ).logits logits_shared_prefix_last = logits_shared_prefix[ 0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], : ] # last three tokens decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)] self.assertEqual(decoded, decoded_shared_prefix) def test_partial_stacked_causal_mask_static_cache(self): # Same as the test above, but the input is passed in two groups. It tests that we can pass partial 4D attention masks # we pass a 4D attention mask shaped [..., seq_len, full_static_cache_len]) ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self.get_test_data() # regular batch logits = self.model.forward(input_ids, position_ids=position_ids).logits logits_last = logits[:, -1, :] # last tokens in each batch line decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)] # upgrade the model with StaticCache max_cache_len = 16 # note that max_cache_len is greater than the attention_mask.shape[-1] past_key_values = StaticCache( config=self.model.config, batch_size=1, max_cache_len=max_cache_len, device=torch_device, dtype=self.model.dtype, ) # forward run for the first part of input part_a = 3 # split point input_1a = input_ids_shared_prefix[:, :part_a] position_ids_1a = position_ids_shared_prefix[:, :part_a] mask_1a = mask_shared_prefix[:, :, :part_a, :part_a] padded_mask_1a = torch.nn.functional.pad( input=mask_1a, pad=(0, max_cache_len - mask_1a.shape[-1]), mode="constant", value=torch.finfo(self.model_dtype).min, ) _ = self.model.forward( input_1a, attention_mask=padded_mask_1a, position_ids=position_ids_1a, cache_position=torch.arange(part_a, device=torch_device), past_key_values=past_key_values, ) # forward run for the second part of input input_1b = input_ids_shared_prefix[:, part_a:] position_ids_1b = position_ids_shared_prefix[:, part_a:] mask_1b = mask_shared_prefix[:, :, part_a:, :] padded_mask_1b = torch.nn.functional.pad( input=mask_1b, pad=(0, max_cache_len - mask_1b.shape[-1]), mode="constant", value=0 ) outs_1b = self.model.forward( input_1b, attention_mask=padded_mask_1b, position_ids=position_ids_1b, cache_position=torch.arange( part_a, input_ids_shared_prefix.shape[-1], device=torch_device, ), past_key_values=past_key_values, ) decoded_1b = [ self.tokenizer.decode(t) for t in outs_1b.logits.argmax(-1)[ 0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1] - part_a ] ] self.assertEqual(decoded, decoded_1b)
transformers/tests/models/llama/test_modeling_llama.py/0
{ "file_path": "transformers/tests/models/llama/test_modeling_llama.py", "repo_id": "transformers", "token_count": 23303 }
425
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict, List, Tuple from parameterized import parameterized from transformers import AutoTokenizer, Mamba2Config, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Mamba2ForCausalLM, Mamba2Model, ) from transformers.models.mamba2.modeling_mamba2 import Mamba2Cache from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_0 else: is_torch_greater_or_equal_than_2_0 = False class Mamba2ModelTester: def __init__( self, parent, batch_size=14, num_heads=8, n_groups=8, state_size=2, head_dim=8, conv_kernel=4, chunk_size=8, seq_length=7, is_training=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, hidden_act="silu", hidden_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, num_labels=3, num_choices=4, scope=None, tie_word_embeddings=False, ): self.parent = parent self.num_heads = num_heads self.n_groups = n_groups self.head_dim = head_dim self.state_size = state_size self.conv_kernel = conv_kernel self.chunk_size = chunk_size self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 self.tie_word_embeddings = tie_word_embeddings def get_large_model_config(self): return Mamba2Config.from_pretrained("revision='refs/pr/9'") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, ) return ( config, input_ids, None, sequence_labels, token_labels, choice_labels, ) def get_config(self, gradient_checkpointing=False): return Mamba2Config( head_dim=self.head_dim, num_heads=self.num_heads, n_groups=self.n_groups, state_size=self.state_size, conv_kernel=self.conv_kernel, chunk_size=self.chunk_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, activation_function=self.hidden_act, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, tie_word_embeddings=self.tie_word_embeddings, ) def prepare_config_and_inputs_for_common(self): ( config, input_ids, _, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() inputs_dict = {"input_ids": input_ids} return config, inputs_dict @unittest.skipIf( not is_torch_greater_or_equal_than_2_0, reason="See https://github.com/huggingface/transformers/pull/24204" ) @require_torch class Mamba2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Mamba2Model, Mamba2ForCausalLM) if is_torch_available() else () all_generative_model_classes = (Mamba2ForCausalLM,) if is_torch_available() else () has_attentions = False # Mamba does not support attentions fx_compatible = False # FIXME let's try to support this @molbap test_torchscript = False # FIXME I think this should be doable @molbap @ArthurZucker test_missing_keys = False test_model_parallel = False test_pruning = False test_head_masking = False # Mamba does not have attention heads pipeline_model_mapping = ( {"feature-extraction": Mamba2Model, "text-generation": Mamba2ForCausalLM} if is_torch_available() else {} ) def setUp(self): self.model_tester = Mamba2ModelTester(self) self.config_tester = ConfigTester( self, config_class=Mamba2Config, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"] ) def test_initialization(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, param in model.named_parameters(): if "D" in name: if param.requires_grad: # check if it's a ones like self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5)) @unittest.skip(reason="Mamba 2 weights are not tied") def test_tied_weights_keys(self): pass @unittest.skip(reason="To fix, Mamba 2 cache slicing is interacting with beam search") def test_beam_search_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="To fix, Mamba 2 cache slicing is interacting with beam search") def test_beam_sample_generate(self): pass @unittest.skip(reason="To fix, Mamba 2 cache slicing test case is an edge case") def test_generate_without_input_ids(self): pass @unittest.skip(reason="To fix, Mamba 2 cache slicing test case is an edge case") def test_greedy_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="Initialization of mamba2 fails this") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="A large mamba2 would be necessary (and costly) for that") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="To fix, Mamba 2 cache slicing test case is an edge case") def test_generate_from_inputs_embeds_decoder_only(self): pass def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, Mamba2Cache): # MODIFIED PART START recursive_check(tuple_object.conv_states, dict_object.conv_states) recursive_check(tuple_object.ssm_states, dict_object.ssm_states) elif isinstance(tuple_object, (List, Tuple)): # MODIFIED PART END for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose(tuple_object, dict_object, atol=1e-5), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) @unittest.skip( reason="Mamba2 does not support generating with input embeddings (custom cache_position computation)" ) def test_inputs_embeds_matches_input_ids_with_generate(self): pass @require_torch @slow class Mamba2IntegrationTest(unittest.TestCase): def setUp(self): self.model_id = "mistralai/Mamba-Codestral-7B-v0.1" self.tokenizer = AutoTokenizer.from_pretrained( self.model_id, revision="refs/pr/9", from_slow=True, legacy=False ) self.prompt = ("[INST]Write a hello world program in C++.",) @parameterized.expand( [ (torch_device,), ] ) @slow @require_torch def test_simple_generate(self, device): """ Simple generate test to avoid regressions. Note: state-spaces (cuda) implementation and pure torch implementation have irreconciliable differences as of now, which will cause this test to fail in an environment with state-spaces installed. """ tokenizer = self.tokenizer tokenizer.pad_token_id = tokenizer.eos_token_id model = Mamba2ForCausalLM.from_pretrained(self.model_id, revision="refs/pr/9", torch_dtype=torch.bfloat16) model.to(device) input_ids = tokenizer("[INST]Write a hello world program in C++.[/INST]", return_tensors="pt")["input_ids"].to( device ) out = model.generate(input_ids, do_sample=False, use_cache=True, max_new_tokens=30) output_sentence = tokenizer.decode(out[0]) ground_truth_sentence = """<s>[INST]Write a hello world program in C++.[/INST] Sure, here is a simple "Hello, World!" program in C++:\n\n```cpp\n#include <iostream>\n\n""" self.assertEqual(output_sentence, ground_truth_sentence) @slow @require_torch_gpu def test_batched_equivalence_with_cache(self): """ Verifies that batched generation matches individual generation. Important because of the specific caching mechanism + statefulness of mamba model. Depending on precision and devices, differences can be observed from generation to generation. """ tokenizer = self.tokenizer prompt = [ "[INST]Write C#.[/INST]", "[INST]Write a hello world in C++.[/INST]", "[INST] Write a simple Fibonacci number computation function in Rust that does memoization, with comments, in safe Rust.[/INST]", ] model = Mamba2ForCausalLM.from_pretrained(self.model_id, revision="refs/pr/9", torch_dtype=torch.bfloat16).to( torch_device ) tokenizer.pad_token_id = tokenizer.eos_token_id # batched generation tokenized_prompts = tokenizer(prompt, return_tensors="pt", padding="longest").to(torch_device) batched_gen = model.generate(**tokenized_prompts, max_new_tokens=30, use_cache=True) batched_output = tokenizer.batch_decode(batched_gen, skip_special_tokens=True) # individual generation for index_gen, individual_prompt in enumerate(prompt): inputs = tokenizer(individual_prompt, return_tensors="pt", padding="longest").to(torch_device) individual_gen = model.generate(**inputs, max_new_tokens=30, use_cache=True) individual_output = tokenizer.batch_decode(individual_gen, skip_special_tokens=True)[0] self.assertEqual(individual_output[:100], batched_output[index_gen][:100]) @slow @require_torch_gpu def test_batched_equivalence_without_cache(self): """ Verifies that batched generation matches individual generation without cache. Important because of the specific caching mechanism + statefulness of mamba model. Depending on precision and devices, differences can be observed from generation to generation. """ tokenizer = self.tokenizer prompt = [ "[INST]Write C#.[/INST]", "[INST]Write a hello world in C++.[/INST]", "[INST] Write a simple Fibonacci number computation function in Rust that does memoization, with comments, in safe Rust.[/INST]", ] model = Mamba2ForCausalLM.from_pretrained(self.model_id, revision="refs/pr/9", torch_dtype=torch.bfloat16).to( torch_device ) tokenizer.pad_token_id = tokenizer.eos_token_id # batched generation tokenized_prompts = tokenizer(prompt, return_tensors="pt", padding="longest").to(torch_device) batched_gen = model.generate(**tokenized_prompts, max_new_tokens=30, use_cache=True) batched_output = tokenizer.batch_decode(batched_gen, skip_special_tokens=True) # individual generation for index_gen, individual_prompt in enumerate(prompt): inputs = tokenizer(individual_prompt, return_tensors="pt", padding="longest").to(torch_device) individual_gen = model.generate(**inputs, max_new_tokens=30, use_cache=True) individual_output = tokenizer.batch_decode(individual_gen, skip_special_tokens=True)[0] self.assertEqual(individual_output[:100], batched_output[index_gen][:100])
transformers/tests/models/mamba2/test_modeling_mamba2.py/0
{ "file_path": "transformers/tests/models/mamba2/test_modeling_mamba2.py", "repo_id": "transformers", "token_count": 7466 }
426
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MaskFormer model.""" import copy import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( require_timm, require_torch, require_torch_accelerator, require_torch_fp16, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn.functional as F from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class MaskFormerModelTester: def __init__( self, parent, batch_size=2, is_training=True, use_auxiliary_loss=False, num_queries=10, num_channels=3, min_size=32 * 4, max_size=32 * 6, num_labels=4, mask_feature_size=32, num_hidden_layers=2, num_attention_heads=2, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_auxiliary_loss = use_auxiliary_loss self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.num_labels = num_labels self.mask_feature_size = mask_feature_size # This is passed to the decoder config. We add it to the model tester here for testing self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( torch_device ) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) mask_labels = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5 ).float() class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long() config = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def get_config(self): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1], embed_dim=16, hidden_size=32, num_heads=[1, 1, 2, 2], ), backbone=None, decoder_config=DetrConfig( decoder_ffn_dim=64, decoder_layers=self.num_hidden_layers, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=64, encoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, num_queries=self.num_queries, d_model=self.mask_feature_size, ), mask_feature_size=self.mask_feature_size, fpn_feature_size=self.mask_feature_size, num_channels=self.num_channels, num_labels=self.num_labels, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, _, _ = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def check_output_hidden_state(self, output, config): encoder_hidden_states = output.encoder_hidden_states pixel_decoder_hidden_states = output.pixel_decoder_hidden_states transformer_decoder_hidden_states = output.transformer_decoder_hidden_states self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(pixel_decoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_config.decoder_layers) def create_and_check_maskformer_model(self, config, pixel_values, pixel_mask, output_hidden_states=False): with torch.no_grad(): model = MaskFormerModel(config=config) model.to(torch_device) model.eval() output = model(pixel_values=pixel_values, pixel_mask=pixel_mask) output = model(pixel_values, output_hidden_states=True) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.mask_feature_size), ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(output, config) def create_and_check_maskformer_instance_segmentation_head_model( self, config, pixel_values, pixel_mask, mask_labels, class_labels ): model = MaskFormerForInstanceSegmentation(config=config) model.to(torch_device) model.eval() def comm_check_on_output(result): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) comm_check_on_output(result) result = model( pixel_values=pixel_values, pixel_mask=pixel_mask, mask_labels=mask_labels, class_labels=class_labels ) comm_check_on_output(result) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape, torch.Size([])) @require_torch class MaskFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = False zero_init_hidden_state = True def setUp(self): self.model_tester = MaskFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=MaskFormerConfig, has_text_modality=False) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in [MaskFormerForInstanceSegmentation]: inputs_dict["mask_labels"] = torch.zeros( ( self.model_tester.batch_size, self.model_tester.num_labels, self.model_tester.min_size, self.model_tester.max_size, ), dtype=torch.float32, device=torch_device, ) inputs_dict["class_labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_labels), dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_maskformer_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(config, **inputs, output_hidden_states=False) def test_maskformer_instance_segmentation_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*config_and_inputs) @unittest.skip(reason="MaskFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="MaskFormer is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="MaskFormer does not use token embeddings") def test_resize_tokens_embeddings(self): pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass @slow def test_model_from_pretrained(self): for model_name in ["facebook/maskformer-swin-small-coco"]: model = MaskFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { "pixel_values": torch.randn((2, 3, *size), device=torch_device), "mask_labels": torch.randn((2, 10, *size), device=torch_device), "class_labels": torch.zeros(2, 10, device=torch_device).long(), } model = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(torch_device) outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_hidden_states_output(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(config, **inputs, output_hidden_states=True) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # Check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # encoder_hidden_states, pixel_decoder_hidden_states, transformer_decoder_hidden_states, hidden_states added_hidden_states = 4 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) def test_retain_grad_hidden_states_attentions(self): # only MaskFormerForInstanceSegmentation has the loss model_class = self.all_model_classes[1] config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs() config.output_hidden_states = True config.output_attentions = True model = model_class(config) model.to(torch_device) model.train() outputs = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels) encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't transformer_decoder_hidden_states = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() attentions = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_forward_auxiliary_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_auxiliary_loss = True config.output_auxiliary_logits = True config.output_hidden_states = True # only test for object detection and segmentation model for model_class in self.all_model_classes[1:]: model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) outputs = model(**inputs) self.assertIsNotNone(outputs.auxiliary_logits) self.assertEqual(len(outputs.auxiliary_logits), self.model_tester.num_channels - 1) def test_batching_equivalence(self): def equivalence(tensor1, tensor2): return 1.0 - F.cosine_similarity(tensor1.float().flatten(), tensor2.float().flatten(), dim=0, eps=0).max() def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) elif batched_object is None: return else: batched_row = batched_object[:1] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( (equivalence(batched_row, single_row_object)) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={equivalence(batched_row, single_row_object)}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) for key in model_batched_output: # remove the first zero-init queries to decoder, otherwise cos_similarity = `nan` # no need to check all hidden_states, already checked separately each one if key == "transformer_decoder_hidden_states": model_batched_output[key] = model_batched_output[key][1:] model_row_output[key] = model_row_output[key][1:] elif key == "hidden_states": continue recursive_check(model_batched_output[key], model_row_output[key], model_name, key) @require_timm def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() config.backbone_config = None config.backbone_kwargs = {"out_indices": [1, 2, 3]} config.use_pretrained_backbone = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices config.backbone = "resnet18" config.use_timm_backbone = True for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "MaskFormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "MaskFormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) # Load a HF backbone config.backbone = "microsoft/resnet-18" config.use_timm_backbone = False for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "MaskFormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "MaskFormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @slow class MaskFormerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco") if is_vision_available() else None ) def test_inference_no_head(self): model = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 800, 1088)) with torch.no_grad(): outputs = model(**inputs) expected_slice_hidden_state = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) def test_inference_instance_segmentation_head(self): model = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco") .to(torch_device) .eval() ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 800, 1088)) with torch.no_grad(): outputs = model(**inputs) # masks_queries_logits masks_queries_logits = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), ) expected_slice = [ [-1.3737124, -1.7724937, -1.9364233], [-1.5977281, -1.9867939, -2.1523695], [-1.5795398, -1.9269832, -2.093942], ] expected_slice = torch.tensor(expected_slice).to(torch_device) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE)) # class_queries_logits class_queries_logits = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) expected_slice = torch.tensor( [ [1.6512e00, -5.2572e00, -3.3519e00], [3.6169e-02, -5.9025e00, -2.9313e00], [1.0766e-04, -7.7630e00, -5.1263e00], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_instance_segmentation_head_resnet_backbone(self): model = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff") .to(torch_device) .eval() ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(inputs_shape, (1, 3, 800, 1088)) with torch.no_grad(): outputs = model(**inputs) # masks_queries_logits masks_queries_logits = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), ) expected_slice = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] expected_slice = torch.tensor(expected_slice).to(torch_device) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE)) # class_queries_logits class_queries_logits = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) expected_slice = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE)) @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): model = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff") .to(torch_device, dtype=torch.float16) .eval() ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device, dtype=torch.float16) with torch.no_grad(): _ = model(**inputs) def test_with_segmentation_maps_and_loss(self): model = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco") .to(torch_device) .eval() ) image_processor = self.default_image_processor inputs = image_processor( [np.zeros((3, 400, 333)), np.zeros((3, 400, 333))], segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)], return_tensors="pt", ) inputs["pixel_values"] = inputs["pixel_values"].to(torch_device) inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]] inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]] with torch.no_grad(): outputs = model(**inputs) self.assertTrue(outputs.loss is not None)
transformers/tests/models/maskformer/test_modeling_maskformer.py/0
{ "file_path": "transformers/tests/models/maskformer/test_modeling_maskformer.py", "repo_id": "transformers", "token_count": 12685 }
427
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class MgpstrTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "alibaba-damo/mgp-str-base" tokenizer_class = MgpstrTokenizer test_rust_tokenizer = False from_pretrained_kwargs = {} test_seq2seq = False def setUp(self): super().setUp() vocab = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") def get_tokenizer(self, **kwargs): return MgpstrTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "tester" output_text = "tester" return input_text, output_text @unittest.skip(reason="MGP-STR always lower cases letters.") def test_added_tokens_do_lower_case(self): pass def test_add_special_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): special_token = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token}) encoded_special_token = tokenizer.encode([special_token], add_special_tokens=False) self.assertEqual(len(encoded_special_token), 1) decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True) self.assertTrue(special_token not in decoded) def test_internal_consistency(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): input_text, output_text = self.get_input_output_texts(tokenizer) tokens = tokenizer.tokenize(input_text) ids = tokenizer.convert_tokens_to_ids(tokens) ids_2 = tokenizer.encode(input_text, add_special_tokens=False) self.assertListEqual(ids, ids_2) tokens_2 = tokenizer.convert_ids_to_tokens(ids) self.assertNotEqual(len(tokens_2), 0) text_2 = tokenizer.decode(ids) self.assertIsInstance(text_2, str) self.assertEqual(text_2.replace(" ", ""), output_text) @unittest.skip(reason="MGP-STR tokenizer only handles one sequence.") def test_maximum_encoding_length_pair_input(self): pass @unittest.skip(reason="inputs cannot be pretokenized in MgpstrTokenizer") def test_pretokenized_inputs(self): pass
transformers/tests/models/mgp_str/test_tokenization_mgp_str.py/0
{ "file_path": "transformers/tests/models/mgp_str/test_tokenization_mgp_str.py", "repo_id": "transformers", "token_count": 1631 }
428
# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. All rights reserved. # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Nemotron model.""" import tempfile import unittest import pytest from parameterized import parameterized from transformers import NemotronConfig, is_torch_available from transformers.testing_utils import ( is_flaky, require_flash_attn, require_read_token, require_torch, require_torch_gpu, require_torch_sdpa, slow, torch_device, ) from ...models.gemma.test_modeling_gemma import GemmaModelTest, GemmaModelTester from ...test_configuration_common import ConfigTester if is_torch_available(): import torch from transformers import ( AutoTokenizer, NemotronForCausalLM, NemotronForQuestionAnswering, NemotronForSequenceClassification, NemotronForTokenClassification, NemotronModel, ) class NemotronModelTester(GemmaModelTester): if is_torch_available(): config_class = NemotronConfig model_class = NemotronModel for_causal_lm_class = NemotronForCausalLM for_sequence_class = NemotronForSequenceClassification for_token_class = NemotronForTokenClassification @require_torch class NemotronModelTest(GemmaModelTest): # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] all_model_classes = ( ( NemotronModel, NemotronForCausalLM, NemotronForSequenceClassification, NemotronForQuestionAnswering, NemotronForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (NemotronForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": NemotronModel, "text-classification": NemotronForSequenceClassification, "text-generation": NemotronForCausalLM, "zero-shot": NemotronForSequenceClassification, "question-answering": NemotronForQuestionAnswering, "token-classification": NemotronForTokenClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = False # used in `test_torch_compile` _torch_compile_test_ckpt = "nvidia/nemotron-3-8b-base-4k-hf" def setUp(self): self.model_tester = NemotronModelTester(self) self.config_tester = ConfigTester(self, config_class=NemotronConfig, hidden_size=37) @require_torch_sdpa @slow @unittest.skip( reason="Due to custom causal mask, there is a slightly too big difference between eager and sdpa in bfloat16." ) @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) def test_eager_matches_sdpa_inference(self, torch_dtype: str): pass @unittest.skip("Eager and SDPA do not produce the same outputs, thus this test fails") def test_model_outputs_equivalence(self, **kwargs): pass @require_torch_sdpa @require_torch_gpu @slow def test_sdpa_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_sdpa: self.skipTest(reason="Model does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="sdpa" ) model_sdpa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, attn_implementation="eager") model.to(torch_device) dummy_input = inputs_dict[model_class.main_input_name] dummy_input = dummy_input.to(torch_device) outputs = model(dummy_input, output_hidden_states=True) outputs_sdpa = model_sdpa(dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_sdpa = outputs_sdpa.hidden_states[-1] # nemotron sdpa needs a high tolerance assert torch.allclose(logits_sdpa, logits, atol=1e-2) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @is_flaky() @slow def test_flash_attn_2_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(reason="Model does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, attn_implementation="eager") model.to(torch_device) dummy_input = inputs_dict[model_class.main_input_name] dummy_input = dummy_input.to(torch_device) outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] # nemotron flash attention 2 needs a high tolerance assert torch.allclose(logits_fa, logits, atol=1e-2) @require_torch_gpu class NemotronIntegrationTest(unittest.TestCase): # This variable is used to determine which CUDA device are we using for our runners (A10 or T4) # Depending on the hardware we get different logits / generations cuda_compute_capability_major_version = None @classmethod def setUpClass(cls): if is_torch_available() and torch.cuda.is_available(): # 8 is for A100 / A10 and 7 for T4 cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0] @slow @require_read_token def test_nemotron_8b_generation_sdpa(self): text = ["What is the largest planet in solar system?"] EXPECTED_TEXT = [ "What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer", ] model_id = "thhaus/nemotron3-8b" model = NemotronForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="sdpa" ) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(text, return_tensors="pt").to(torch_device) output = model.generate(**inputs, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) @slow @require_read_token def test_nemotron_8b_generation_eager(self): text = ["What is the largest planet in solar system?"] EXPECTED_TEXT = [ "What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer", ] model_id = "thhaus/nemotron3-8b" model = NemotronForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="eager" ) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(text, return_tensors="pt").to(torch_device) output = model.generate(**inputs, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text) @slow @require_read_token def test_nemotron_8b_generation_fa2(self): text = ["What is the largest planet in solar system?"] EXPECTED_TEXT = [ "What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer", ] model_id = "thhaus/nemotron3-8b" model = NemotronForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="flash_attention_2" ) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(text, return_tensors="pt").to(torch_device) output = model.generate(**inputs, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT, output_text)
transformers/tests/models/nemotron/test_modeling_nemotron.py/0
{ "file_path": "transformers/tests/models/nemotron/test_modeling_nemotron.py", "repo_id": "transformers", "token_count": 4139 }
429
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PaliGemma model.""" import gc import unittest import requests from parameterized import parameterized from transformers import ( PaliGemmaConfig, PaliGemmaForConditionalGeneration, PaliGemmaProcessor, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( require_read_token, require_torch, require_torch_sdpa, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch else: is_torch_greater_or_equal_than_2_0 = False if is_vision_available(): from PIL import Image class PaliGemmaVisionText2TextModelTester: def __init__( self, parent, ignore_index=-100, image_token_index=0, projector_hidden_act="gelu", seq_length=25, vision_feature_select_strategy="default", vision_feature_layer=-1, projection_dim=32, text_config={ "model_type": "gemma", "seq_length": 128, "is_training": True, # "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 1, "head_dim": 8, "intermediate_size": 37, "hidden_activation": "gelu_pytorch_tanh", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 512, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 0, }, is_training=True, vision_config={ "use_labels": True, "image_size": 20, "patch_size": 5, "num_image_tokens": 4, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_key_value_heads": 1, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, use_cache=False, ): self.parent = parent self.ignore_index = ignore_index # `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.seq_length = seq_length self.projection_dim = projection_dim self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = vision_config["num_channels"] self.image_size = vision_config["image_size"] self.encoder_seq_length = seq_length self.use_cache = use_cache def get_config(self): return PaliGemmaConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, projector_hidden_act=self.projector_hidden_act, projection_dim=self.projection_dim, vision_feature_select_strategy=self.vision_feature_select_strategy, vision_feature_layer=self.vision_feature_layer, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(1).to(torch_device) # set the 16 first tokens to be image, and ensure that no other tokens are image tokens # do not change this unless you modified image size or patch size input_ids = torch.where(input_ids == config.image_token_index, 2, input_ids) input_ids[:, :16] = config.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "labels": input_ids, "token_type_ids": torch.zeros_like(input_ids), } return config, inputs_dict @require_torch class PaliGemmaForConditionalGenerationModelTest(ModelTesterMixin, unittest.TestCase): """ Model tester for `PaliGemmaForConditionalGeneration`. """ all_model_classes = (PaliGemmaForConditionalGeneration,) if is_torch_available() else () fx_compatible = False test_pruning = False test_torchscript = False test_head_masking = False def setUp(self): self.model_tester = PaliGemmaVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=PaliGemmaConfig, has_text_modality=False) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs # while some other models require pixel_values to be present def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] inputs_embeds = model.get_input_embeddings()(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] self.assertTrue(torch.allclose(out_embeds, out_ids)) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_cpu_offload(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_bin(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_model_parallelism(self): pass @require_torch_sdpa @slow @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) def test_eager_matches_sdpa_inference(self, torch_dtype: str): self.skipTest( "Due to custom causal mask, there is a slightly too big difference between eager and sdpa in bfloat16." ) @unittest.skip( reason="PaliGemmma's SigLip encoder uses the same initialization scheme as the Flax original implementation" ) def test_initialization(self): pass # TODO extend valid outputs to include this test @Molbap @unittest.skip(reason="PaliGemma has currently one output format.") def test_model_outputs_equivalence(self): pass # TODO fix the loss = nan in the testing configuration chosen @Molbap @unittest.skip(reason="Edge case giving loss nan values in testing configuration.") def test_determinism(self): pass @unittest.skip(reason="PaliGemma does not use feedforward chunking.") def test_feed_forward_chunking(self): pass @unittest.skip(reason="PaliGemma does not support low_cpu_mem_usage.") def test_save_load_low_cpu_mem_usage(self): pass @unittest.skip(reason="PaliGemma does not support low_cpu_mem_usage.") def test_save_load_low_cpu_mem_usage_checkpoints(self): pass @unittest.skip(reason="PaliGemma does not support low_cpu_mem_usage.") def test_save_load_low_cpu_mem_usage_no_safetensors(self): pass @slow @require_torch @require_read_token class PaliGemmaForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = PaliGemmaProcessor.from_pretrained("google/paligemma-3b-pt-224") def tearDown(self): gc.collect() torch.cuda.empty_cache() @slow @require_read_token def test_small_model_integration_test(self): # Let' s make sure we test the preprocessing to replace what is used model_id = "google/paligemma-3b-pt-224" model = PaliGemmaForConditionalGeneration.from_pretrained(model_id) prompt = "" image_file = ( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png" ) raw_image = Image.open(requests.get(image_file, stream=True).raw) inputs = self.processor(text=prompt, images=raw_image, return_tensors="pt") EXPECTED_INPUT_IDS = torch.tensor([[257152] * 256 + [2, 108]]) self.assertTrue(torch.equal(inputs["input_ids"], EXPECTED_INPUT_IDS)) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = "\ncow on the beach" # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_read_token def test_small_model_integration_test_paligemma_VQA(self): # Let' s make sure we test the preprocessing to replace what is used model_id = "google/paligemma-3b-pt-224" model = PaliGemmaForConditionalGeneration.from_pretrained(model_id) prompt = "answer en Where is the cow standing?" image_file = ( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png" ) raw_image = Image.open(requests.get(image_file, stream=True).raw) inputs = self.processor(text=prompt, images=raw_image, return_tensors="pt").to(torch.float16) output = model.generate(**inputs, max_new_tokens=900, do_sample=False) EXPECTED_DECODED_TEXT = "answer en Where is the cow standing?\nbeach" # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_read_token def test_small_model_integration_test_paligemma_empty_prompt(self): # Let' s make sure we test the preprocessing to replace what is used model_id = "google/paligemma-3b-pt-224" model = PaliGemmaForConditionalGeneration.from_pretrained(model_id) prompt = "" image_file = ( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png" ) raw_image = Image.open(requests.get(image_file, stream=True).raw) inputs = self.processor(text=prompt, images=raw_image, return_tensors="pt").to(torch.float16) output = model.generate(**inputs, max_new_tokens=900, do_sample=False) EXPECTED_DECODED_TEXT = "\ncow on the beach" # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_read_token def test_small_model_integration_test_paligemma_batched(self): # Let' s make sure we test the preprocessing to replace what is used model_id = "google/paligemma-3b-pt-224" model = PaliGemmaForConditionalGeneration.from_pretrained(model_id) prompts = [ "answer en Where is the cow standing?", "", ] image1 = Image.open( requests.get( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png", stream=True, ).raw ) image2 = image1 inputs = self.processor(text=prompts, images=[image1, image2], return_tensors="pt", padding=True) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = ["answer en Where is the cow standing?\nbeach", "\ncow on the beach"] # fmt: skip self.assertEqual(self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT) @slow @require_torch @require_read_token def test_small_model_integration_test_paligemma_batched_bf16(self): # Let' s make sure we test the preprocessing to replace what is used model_id = "google/paligemma-3b-pt-224" model = PaliGemmaForConditionalGeneration.from_pretrained( model_id, revision="bfloat16", torch_dtype=torch.bfloat16 ).to(torch_device) # The first batch is longer in terms of text, the second will be padded. prompts = [ "answer en Where is the cow standing?", "", ] image1 = Image.open( requests.get( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png", stream=True, ).raw ) image2 = image1 inputs = ( self.processor(text=prompts, images=[image1, image2], return_tensors="pt", padding=True) .to(torch.bfloat16) .to(torch_device) ) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = ["answer en Where is the cow standing?\nbeach", "\ncow on the beach"] # fmt: skip self.assertEqual(self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT) @slow @require_torch @require_read_token def test_small_model_integration_test_paligemma_batched_f16(self): # Let' s make sure we test the preprocessing to replace what is used model_id = "google/paligemma-3b-pt-224" model = PaliGemmaForConditionalGeneration.from_pretrained( model_id, revision="float16", torch_dtype=torch.float16 ).to(torch_device) # The first batch is longer in terms of text, the second will be padded. prompts = [ "answer en Where is the cow standing?", "", ] image1 = Image.open( requests.get( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png", stream=True, ).raw ) image2 = image1 inputs = ( self.processor(text=prompts, images=[image1, image2], return_tensors="pt", padding=True) .to(torch.float16) .to(torch_device) ) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = ["answer en Where is the cow standing?\nbeach", "\ncow on the beach"] # fmt: skip self.assertEqual(self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT) @slow @require_torch @require_read_token def test_integration_detection_bug(self): # this is a reproducer of https://github.com/huggingface/transformers/issues/31425 where not enough context # impacted negatively segmentation generations. model_id = "google/paligemma-3b-pt-224" model = PaliGemmaForConditionalGeneration.from_pretrained( model_id, revision="bfloat16", torch_dtype=torch.bfloat16 ).to(torch_device) prompt = ("detect shoe",) image = Image.open( requests.get( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/shoe.png", stream=True, ).raw ) inputs = self.processor(text=prompt, images=image, return_tensors="pt").to(torch.bfloat16).to(torch_device) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = "detect shoe\n<loc0051><loc0309><loc0708><loc0646> shoe" # fmt: skip self.assertEqual(self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT) @slow @require_read_token def test_paligemma_index_error_bug(self): # This is a reproducer of https://github.com/huggingface/transformers/pull/28032 and makes sure it does not happen anymore # Please refer to that PR, or specifically https://github.com/huggingface/transformers/pull/28032#issuecomment-1860650043 for # more details model_id = "google/paligemma-3b-pt-224" model = PaliGemmaForConditionalGeneration.from_pretrained(model_id) # Simulate a super long prompt prompt = "\n" * 200 image_file = ( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png" ) raw_image = Image.open(requests.get(image_file, stream=True).raw) inputs = self.processor( text=prompt, images=raw_image, return_tensors="pt", ).to(torch.float16) # Make sure that `generate` works _ = model.generate(**inputs, max_new_tokens=20) @slow @require_torch @require_read_token def test_paligemma_finetuning_with_suffixes_bf16(self): # this is a supplementary test to ensure paligemma fine-tuning that relies on token_type_ids is robust to future changes model_id = "google/paligemma-3b-pt-224" model = PaliGemmaForConditionalGeneration.from_pretrained( model_id, revision="bfloat16", torch_dtype=torch.bfloat16 ).to(torch_device) # The first batch is longer in terms of text, the second will be padded. prompts = [ "answer en Where is the cow standing?", "", ] suffixes = ["beach", "cow standing on the beach"] image1 = Image.open( requests.get( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png", stream=True, ).raw ) image2 = image1 inputs = ( self.processor(text=prompts, suffix=suffixes, images=[image1, image2], return_tensors="pt", padding=True) .to(torch.bfloat16) .to(torch_device) ) expected_labels = torch.tensor( [266 * [-100] + [54901, 1], 262 * [-100] + [14706, 9980, 611, 573, 8318, 1]] ).to(torch_device) assert torch.equal(inputs["labels"], expected_labels) expected_token_type_ids = torch.tensor([266 * [0] + 2 * [1], 262 * [0] + 6 * [1]]).to(torch_device) assert torch.equal(inputs["token_type_ids"], expected_token_type_ids) output = model(**inputs) # check that loss does not error out _ = output.loss
transformers/tests/models/paligemma/test_modeling_paligemma.py/0
{ "file_path": "transformers/tests/models/paligemma/test_modeling_paligemma.py", "repo_id": "transformers", "token_count": 9750 }
430
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Persimmon model.""" import gc import unittest from parameterized import parameterized from transformers import PersimmonConfig, is_torch_available, set_seed from transformers.testing_utils import ( backend_empty_cache, require_bitsandbytes, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, PersimmonForCausalLM, PersimmonForSequenceClassification, PersimmonForTokenClassification, PersimmonModel, ) from transformers.models.persimmon.modeling_persimmon import ( PersimmonDynamicNTKScalingRotaryEmbedding, PersimmonLinearScalingRotaryEmbedding, PersimmonRotaryEmbedding, ) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester with Llama->Persimmon class PersimmonModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones(self.batch_size, self.seq_length)).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return PersimmonConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = PersimmonModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = PersimmonModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = PersimmonForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = PersimmonForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class PersimmonModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (PersimmonModel, PersimmonForCausalLM, PersimmonForSequenceClassification, PersimmonForTokenClassification) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": PersimmonModel, "text-classification": PersimmonForSequenceClassification, "token-classification": PersimmonForTokenClassification, # TODO (ydshieh): check why these two fail. Fix them or skip them in a better way. # "text-generation": PersimmonForCausalLM, # "zero-shot": PersimmonForSequenceClassification, } if is_torch_available() else {} ) all_generative_model_classes = (PersimmonForCausalLM,) if is_torch_available() else () test_headmasking = False test_pruning = False # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.setUp with Llama->Persimmon def setUp(self): self.model_tester = PersimmonModelTester(self) self.config_tester = ConfigTester(self, config_class=PersimmonConfig, hidden_size=37) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model_various_embeddings def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model with Llama->Persimmon,llama->persimmon def test_persimmon_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = PersimmonForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_single_label with Llama->Persimmon,llama->persimmon def test_persimmon_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = PersimmonForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_multi_label with Llama->Persimmon,llama->persimmon def test_persimmon_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = PersimmonForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_token_classification_model with Llama->Persimmon,llama->persimmon def test_persimmon_token_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels) model = PersimmonForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=token_labels) self.assertEqual( result.logits.shape, (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), ) @unittest.skip(reason="Persimmon buffers include complex numbers, which breaks this test") # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_save_load_fast_init_from_base def test_save_load_fast_init_from_base(self): pass @parameterized.expand([("linear",), ("dynamic",)]) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model_rope_scaling_from_config with Llama->Persimmon def test_model_rope_scaling_from_config(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = PersimmonModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = PersimmonModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) # Copied from tests.models.falcon.test_modeling_falcon.FalconModelTest.test_model_rope_scaling with Falcon->Persimmon def test_model_rope_scaling(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() hidden_size = config.hidden_size num_heads = config.num_attention_heads head_dim = hidden_size // num_heads scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) # Inputs x = torch.randn(1, dtype=torch.float32, device=torch_device) # used exlusively to get the dtype and the device # Sanity check original RoPE original_rope = PersimmonRotaryEmbedding( head_dim, max_position_embeddings=config.max_position_embeddings, base=config.rope_theta, ).to(torch_device) original_cos_short, original_sin_short = original_rope(x, short_input_length) original_cos_long, original_sin_long = original_rope(x, long_input_length) torch.testing.assert_close(original_cos_short, original_cos_long[:short_input_length, :]) torch.testing.assert_close(original_sin_short, original_sin_long[:short_input_length, :]) # Sanity check linear RoPE scaling # New position "x" should match original position with index "x/scaling_factor" linear_scaling_rope = PersimmonLinearScalingRotaryEmbedding( head_dim, max_position_embeddings=config.max_position_embeddings, base=config.rope_theta, scaling_factor=scaling_factor, ).to(torch_device) linear_cos_short, linear_sin_short = linear_scaling_rope(x, short_input_length) linear_cos_long, linear_sin_long = linear_scaling_rope(x, long_input_length) torch.testing.assert_close(linear_cos_short, linear_cos_long[:short_input_length, :]) torch.testing.assert_close(linear_sin_short, linear_sin_long[:short_input_length, :]) for new_position in range(0, long_input_length, scaling_factor): original_position = int(new_position // scaling_factor) torch.testing.assert_close(linear_cos_long[new_position, :], original_cos_long[original_position, :]) torch.testing.assert_close(linear_sin_long[new_position, :], original_sin_long[original_position, :]) # Sanity check Dynamic NTK RoPE scaling # Scaling should only be observed after a long input is fed. We can observe that the frequencies increase # with scaling_factor (or that `inv_freq` decreases) ntk_scaling_rope = PersimmonDynamicNTKScalingRotaryEmbedding( head_dim, max_position_embeddings=config.max_position_embeddings, base=config.rope_theta, scaling_factor=scaling_factor, ).to(torch_device) ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, short_input_length) ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, long_input_length) torch.testing.assert_close(ntk_cos_short, original_cos_short) torch.testing.assert_close(ntk_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_sin_long, original_sin_long) self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all()) @require_torch class PersimmonIntegrationTest(unittest.TestCase): @slow @require_torch_accelerator @require_bitsandbytes def test_model_8b_chat_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = PersimmonForCausalLM.from_pretrained( "adept/persimmon-8b-chat", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16 ) out = model(torch.tensor([input_ids], device=torch_device)).logits EXPECTED_MEAN = torch.tensor( [[-11.4726, -11.1495, -11.2694, -11.2223, -10.9452, -11.0663, -11.0031, -11.1028]] ) # change dtype to `torch.float32` before calling `mean` to avoid `nan` values torch.testing.assert_close(out.cpu().to(torch.float32).mean(-1), EXPECTED_MEAN, atol=1e-4, rtol=1e-4) # fmt: off EXPECTED_SLICE = torch.tensor( [-16.9062, -16.9062, -16.9062, -16.9062, -16.8906, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9531, -16.9062, -16.9062], dtype=torch.float16 ) # fmt: on torch.testing.assert_close(out.cpu()[0, 0, :30], EXPECTED_SLICE, atol=1e-5, rtol=1e-5) backend_empty_cache(torch_device) del model gc.collect() @slow @require_torch_accelerator @require_torch_fp16 @require_bitsandbytes def test_model_8b_chat_greedy_generation(self): EXPECTED_TEXT_COMPLETION = """human: Simply put, the theory of relativity states that?\n\nadept: The theory of relativity states that the laws of physics are the same for all observers, regardless of their relative motion.""" prompt = "human: Simply put, the theory of relativity states that?\n\nadept:" tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-chat", use_fast=False) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(torch_device) model = PersimmonForCausalLM.from_pretrained( "adept/persimmon-8b-chat", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16 ) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=64) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) backend_empty_cache(torch_device) del model gc.collect()
transformers/tests/models/persimmon/test_modeling_persimmon.py/0
{ "file_path": "transformers/tests/models/persimmon/test_modeling_persimmon.py", "repo_id": "transformers", "token_count": 10480 }
431
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PoolFormer model.""" import unittest from transformers import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MODEL_MAPPING, PoolFormerConfig, PoolFormerForImageClassification, PoolFormerModel if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class PoolFormerConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class PoolFormerModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], is_training=False, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = PoolFormerConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, initializer_range=self.initializer_range, ) return config, pixel_values, labels def create_and_check_model(self, config, pixel_values, labels): model = PoolFormerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // 32.0 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class PoolFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PoolFormerModel, PoolFormerForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": PoolFormerModel, "image-classification": PoolFormerForImageClassification} if is_torch_available() else {} ) test_head_masking = False test_pruning = False test_resize_embeddings = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = PoolFormerModelTester(self) self.config_tester = PoolFormerConfigTester(self, config_class=PoolFormerConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="PoolFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="PoolFormer does not have get_input_embeddings method and get_output_embeddings methods") def test_model_get_set_embeddings(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): model_name = "sail/poolformer_s12" model = PoolFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class PoolFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_classification_head(self): image_processor = PoolFormerImageProcessor() model = PoolFormerForImageClassification.from_pretrained("sail/poolformer_s12").to(torch_device) inputs = image_processor(images=prepare_img(), return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.6113, 0.1685, -0.0492]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/poolformer/test_modeling_poolformer.py/0
{ "file_path": "transformers/tests/models/poolformer/test_modeling_poolformer.py", "repo_id": "transformers", "token_count": 3643 }
432
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SeamlessM4Tv2 model.""" import copy import tempfile import unittest from transformers import SeamlessM4Tv2Config, is_speech_available, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers.trainer_utils import set_seed from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) if is_torch_available(): import torch from transformers import ( SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, SeamlessM4Tv2ForTextToSpeech, SeamlessM4Tv2ForTextToText, SeamlessM4Tv2Model, ) if is_speech_available(): from transformers import SeamlessM4TProcessor class SeamlessM4Tv2ModelTester: def __init__( self, parent, input_modality="speech", batch_size=2, seq_length=4, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_new_tokens=None, num_labels=3, num_choices=4, scope=None, vocab_size=20, t2u_vocab_size=20, hidden_size=6, num_hidden_layers=2, intermediate_size=6, max_position_embeddings=256, encoder_layers=2, decoder_layers=2, encoder_ffn_dim=6, decoder_ffn_dim=6, t2u_encoder_layers=2, t2u_decoder_layers=2, t2u_encoder_ffn_dim=6, t2u_decoder_ffn_dim=6, num_heads=2, vocoder_num_spkrs=5, vocoder_num_langs=5, upsample_initial_channel=32, unit_embed_dim=25, spkr_embed_dim=6, lang_embed_dim=6, num_conv_pos_embeddings=8, unit_hifi_gan_vocab_size=20, t2u_num_langs=0, t2u_offset_tgt_lang=0, vocoder_offset=0, t2u_variance_predictor_hidden_dim=4, char_vocab_size=4, left_max_position_embeddings=2, right_max_position_embeddings=1, speech_encoder_chunk_size=2, speech_encoder_left_chunk_num=1, ): self.parent = parent self.input_modality = input_modality self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.vocab_size = vocab_size self.t2u_vocab_size = t2u_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.max_position_embeddings = max_position_embeddings self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.encoder_ffn_dim = encoder_ffn_dim self.decoder_ffn_dim = decoder_ffn_dim self.t2u_encoder_layers = t2u_encoder_layers self.t2u_decoder_layers = t2u_decoder_layers self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim self.num_heads = num_heads self.num_attention_heads = num_heads self.vocoder_num_spkrs = vocoder_num_spkrs self.vocoder_num_langs = vocoder_num_langs self.upsample_initial_channel = upsample_initial_channel self.unit_embed_dim = unit_embed_dim self.spkr_embed_dim = spkr_embed_dim self.num_conv_pos_embeddings = num_conv_pos_embeddings self.lang_embed_dim = lang_embed_dim self.max_new_tokens = max_new_tokens self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size self.t2u_num_langs = t2u_num_langs self.t2u_offset_tgt_lang = t2u_offset_tgt_lang self.vocoder_offset = vocoder_offset self.t2u_variance_predictor_hidden_dim = t2u_variance_predictor_hidden_dim self.char_vocab_size = char_vocab_size self.left_max_position_embeddings = left_max_position_embeddings self.right_max_position_embeddings = right_max_position_embeddings self.speech_encoder_chunk_size = speech_encoder_chunk_size self.speech_encoder_left_chunk_num = speech_encoder_left_chunk_num def prepare_config_and_inputs(self): if self.input_modality == "text": inputs = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) else: inputs = ids_tensor([self.batch_size, self.seq_length, 160], self.vocab_size - 1).float() input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) lm_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, inputs, decoder_input_ids, input_mask, lm_labels def get_config(self): return SeamlessM4Tv2Config( hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, vocab_size=self.vocab_size, t2u_vocab_size=self.t2u_vocab_size, hidden_size=self.hidden_size, speech_encoder_layers=self.num_heads, speech_encoder_intermediate_size=self.intermediate_size, max_position_embeddings=self.max_position_embeddings, encoder_layers=self.encoder_layers, decoder_layers=self.decoder_layers, encoder_ffn_dim=self.encoder_ffn_dim, decoder_ffn_dim=self.decoder_ffn_dim, t2u_encoder_layers=self.t2u_encoder_layers, t2u_decoder_layers=self.t2u_decoder_layers, t2u_encoder_ffn_dim=self.t2u_encoder_ffn_dim, t2u_decoder_ffn_dim=self.t2u_decoder_ffn_dim, num_attention_heads=self.num_heads, encoder_attention_heads=self.num_heads, decoder_attention_heads=self.num_heads, t2u_encoder_attention_heads=self.num_heads, t2u_decoder_attention_heads=self.num_heads, speech_encoder_attention_heads=self.num_heads, unit_hifigan_vocab_vise=self.t2u_vocab_size, vocoder_num_spkrs=self.vocoder_num_spkrs, vocoder_num_langs=self.vocoder_num_langs, upsample_initial_channel=self.upsample_initial_channel, unit_embed_dim=self.unit_embed_dim, spkr_embed_dim=self.spkr_embed_dim, num_conv_pos_embeddings=self.num_conv_pos_embeddings, lang_embed_dim=self.lang_embed_dim, max_new_tokens=self.max_new_tokens, unit_hifi_gan_vocab_size=self.unit_hifi_gan_vocab_size, t2u_num_langs=self.t2u_num_langs, t2u_offset_tgt_lang=self.t2u_offset_tgt_lang, vocoder_offset=self.vocoder_offset, t2u_variance_predictor_embed_dim=self.hidden_size, t2u_variance_predictor_hidden_dim=self.t2u_variance_predictor_hidden_dim, char_vocab_size=self.char_vocab_size, left_max_position_embeddings=self.left_max_position_embeddings, right_max_position_embeddings=self.right_max_position_embeddings, speech_encoder_chunk_size=self.speech_encoder_chunk_size, speech_encoder_left_chunk_num=self.speech_encoder_left_chunk_num, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, decoder_input_ids, input_mask, lm_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, decoder_input_ids, input_mask, lm_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model(self, config, input_ids, decoder_input_ids, input_mask, labels): model = SeamlessM4Tv2Model(config=config) model.to(torch_device) model.eval() if self.input_modality == "text": result = model(input_ids=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) else: result = model(input_features=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids) result = model(input_features=input_ids, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) decoder_output = result.logits decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state if self.input_modality == "text": seq_length = self.seq_length else: # if speech, expected length has been subsampled. seq_length = model._compute_sub_sample_lengths_from_attention_mask(input_mask).max().item() self.parent.assertEqual(encoder_output.size(), (self.batch_size, seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, decoder_input_ids.shape[1], self.vocab_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.decoder_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, input_mask, lm_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True model = SeamlessM4Tv2Model(config=config) model.to(torch_device) model.eval() # make sure no pad token in decoder_input_ids decoder_input_ids = torch.clamp(decoder_input_ids, config.pad_token_id + 1) # first forward pass outputs = model( input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=input_mask, use_cache=True ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([decoder_input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( input_ids, decoder_input_ids=next_input_ids, decoder_attention_mask=next_attention_mask, output_hidden_states=True, ) output_from_no_past = output_from_no_past["decoder_hidden_states"][0] output_from_past = model( input_ids, decoder_input_ids=next_tokens, decoder_attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["decoder_hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, input_mask, lm_labels, ) = config_and_inputs input_name = "input_ids" if self.input_modality == "text" else "input_features" inputs_dict = { input_name: input_ids, "attention_mask": input_mask, "decoder_input_ids": decoder_input_ids, "labels": lm_labels, } return config, inputs_dict @require_torch class SeamlessM4Tv2ModelWithSpeechInputTest(ModelTesterMixin, unittest.TestCase): is_encoder_decoder = True fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_resize_embeddings = False test_headmasking = False test_torchscript = False all_model_classes = ( ( SeamlessM4Tv2Model, SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, ) if is_torch_available() else () ) all_generative_model_classes = (SeamlessM4Tv2ForSpeechToText,) if is_torch_available() else () input_name = "input_features" def setUp(self): self.model_tester = SeamlessM4Tv2ModelTester(self, input_modality="speech") self.config_tester = ConfigTester(self, config_class=SeamlessM4Tv2Config) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/seamless-m4t-v2-large" model = SeamlessM4Tv2Model.from_pretrained(model_name) self.assertIsNotNone(model) def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:batch_size, :sequence_length] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` if isinstance(config.eos_token_id, int): config.eos_token_id = [config.eos_token_id] config.pad_token_id = config.eos_token_id[0] attention_mask = torch.ones(input_ids.shape[:2], dtype=torch.long)[:batch_size, :sequence_length] return config, input_ids.float(), attention_mask, max_length @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) generation_config = copy.deepcopy(model.generation_config) model._prepare_special_tokens(generation_config) input_ids = ( torch.zeros(input_ids.shape[:2], dtype=torch.int64, layout=input_ids.layout, device=input_ids.device) + generation_config.decoder_start_token_id ) attention_mask = None return encoder_outputs, input_ids, attention_mask def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", "adapter", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="SeamlessM4Tv2SpeechEncoder doesn't have an embedding layer") def test_inputs_embeds(self): pass @unittest.skip(reason="SeamlessM4TSpeechEncoder doesn't have an embedding layer") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip( reason="Expected missing keys serve when using SeamlessM4Tv2ForXXX.from_pretrained from a checkpoint saved by SeamlessM4Tv2Model.save_pretrained." ) def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip( reason="SeamlessM4Tv2Model is base class but has actually a bigger architecture than seamlessM4T task-specific models." ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SeamlessM4Tv2Model can takes input_ids or input_features") def test_forward_signature(self): pass @unittest.skip(reason="SeamlessM4Tv2 has no base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( reason="This architecure has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass def test_attention_outputs(self): # expected length is subsampled so need to change a bit this test if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) # no more chunk_length test for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) sub_sampled_length = ( model._compute_sub_sample_lengths_from_attention_mask(inputs_dict["attention_mask"]).max().item() ) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, sub_sampled_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @require_torch class SeamlessM4Tv2ModelWithTextInputTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): is_encoder_decoder = True fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_resize_embeddings = True test_headmasking = False test_torchscript = False all_model_classes = ( ( SeamlessM4Tv2Model, SeamlessM4Tv2ForTextToSpeech, SeamlessM4Tv2ForTextToText, ) if is_torch_available() else () ) all_generative_model_classes = (SeamlessM4Tv2ForTextToText,) if is_torch_available() else () def setUp(self): self.model_tester = SeamlessM4Tv2ModelTester(self, input_modality="text") self.config_tester = ConfigTester(self, config_class=SeamlessM4Tv2Config) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/seamless-m4t-v2-large" model = SeamlessM4Tv2Model.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", "adapter", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip( reason="Expected missing keys serve when using SeamlessM4Tv2ForXXX.from_pretrained from a checkpoint saved by SeamlessM4Tv2Model.save_pretrained." ) def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip(reason="SeamlessM4Tv2Model can take input_ids or input_features") def test_forward_signature(self): pass def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) @unittest.skip( reason="SeamlessM4Tv2Model is base class but has actually a bigger architecture than seamlessM4T task-specific models." ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SeamlessM4Tv2 has no base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( reason="This architecure has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass @require_torch class SeamlessM4Tv2GenerationTest(unittest.TestCase): # test that non-standard generation works # test generation of: SeamlessM4Tv2Model, SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, SeamlessM4Tv2ForTextToSpeech def setUp(self): self.speech_model_tester = SeamlessM4Tv2ModelTester(self, input_modality="speech") self.text_model_tester = SeamlessM4Tv2ModelTester(self, input_modality="text") self.tmpdirname = tempfile.mkdtemp() def update_generation(self, model): text_lang_code_to_id = { "fra": 4, "eng": 4, "rus": 4, } speech_lang_code_to_id = { "fra": 4, "eng": 4, } id_to_text = {str(i): "a" for i in range(model.config.vocab_size)} id_to_text["0"] = "ab" id_to_text["1"] = "_b" id_to_text["3"] = "," id_to_text["4"] = "_cd" char_to_id = {char: i for (i, char) in enumerate("abcd")} generation_config = copy.deepcopy(model.generation_config) generation_config.__setattr__("text_decoder_lang_to_code_id", text_lang_code_to_id) generation_config.__setattr__("t2u_lang_code_to_id", speech_lang_code_to_id) generation_config.__setattr__("vocoder_lang_code_to_id", speech_lang_code_to_id) generation_config.__setattr__("id_to_text", id_to_text) generation_config.__setattr__("char_to_id", char_to_id) generation_config.__setattr__("eos_token_id", 0) generation_config._from_model_config = False model.generation_config = generation_config def prepare_text_input(self, tgt_lang): config, inputs, decoder_input_ids, input_mask, lm_labels = self.text_model_tester.prepare_config_and_inputs() input_dict = { "input_ids": inputs, "attention_mask": input_mask, "tgt_lang": tgt_lang, "num_beams": 2, "do_sample": True, } return config, input_dict def prepare_speech_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.speech_model_tester.prepare_config_and_inputs() input_dict = { "input_features": inputs, "attention_mask": input_mask, "tgt_lang": "fra", "num_beams": 2, "do_sample": True, } return config, input_dict def prepare_speech_and_text_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.speech_model_tester.prepare_config_and_inputs() input_speech = { "input_features": inputs, "attention_mask": input_mask, "tgt_lang": "fra", "num_beams": 2, "do_sample": True, } config, inputs, decoder_input_ids, input_mask, lm_labels = self.text_model_tester.prepare_config_and_inputs() input_text = { "input_ids": inputs, "attention_mask": input_mask, "tgt_lang": "eng", "num_beams": 2, "do_sample": True, } return config, input_speech, input_text def factory_generation_speech_test(self, model, inputs): set_seed(0) output = model.generate(**inputs) return output def test_generation_languages(self): config, input_text_rus = self.prepare_text_input(tgt_lang="rus") model = SeamlessM4Tv2Model(config=config) self.update_generation(model) model.to(torch_device) model.eval() # make sure that generating speech, with a language that is only supported for text translation, raises error with self.assertRaises(ValueError): model.generate(**input_text_rus) # make sure that generating text only works model.generate(**input_text_rus, generate_speech=False) # make sure it works for languages supported by both output modalities config, input_text_eng = self.prepare_text_input(tgt_lang="eng") model.generate(**input_text_eng) model.generate(**input_text_eng, generate_speech=False) def test_speech_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() model = SeamlessM4Tv2Model(config=config) self.update_generation(model) model.save_pretrained(self.tmpdirname) model.to(torch_device) model.eval() output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) state_dict = model.state_dict() text_model = SeamlessM4Tv2ForTextToSpeech.from_pretrained(self.tmpdirname) self.update_generation(text_model) text_model.to(torch_device) text_model.eval() output_text = self.factory_generation_speech_test(model, input_text) speech_model = SeamlessM4Tv2ForSpeechToSpeech.from_pretrained(self.tmpdirname) self.update_generation(speech_model) speech_model.to(torch_device) speech_model.eval() for name, tensor in speech_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") output_speech = self.factory_generation_speech_test(model, input_speech) # test same text output from input text self.assertListEqual(output_original_text[0].ravel().tolist(), output_text[0].ravel().tolist()) self.assertListEqual(output_original_text[1].ravel().tolist(), output_text[1].ravel().tolist()) # test same speech output from input text # assertTrue because super long list makes this hang in case of failure self.assertTrue( output_original_speech[0].ravel().tolist() == output_speech[0].ravel().tolist(), "Speech generated was different", ) self.assertTrue( output_original_speech[1].ravel().tolist() == output_speech[1].ravel().tolist(), "Speech generated was different", ) def test_text_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() # to return speech input_speech["generate_speech"] = False input_text["generate_speech"] = False model = SeamlessM4Tv2Model(config=config) self.update_generation(model) model.save_pretrained(self.tmpdirname) model.to(torch_device) model.eval() output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) # other models don't need it input_speech.pop("generate_speech") input_text.pop("generate_speech") state_dict = model.state_dict() text_model = SeamlessM4Tv2ForTextToText.from_pretrained(self.tmpdirname) self.update_generation(text_model) text_model.to(torch_device) text_model.eval() for name, tensor in text_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist()) output_text = self.factory_generation_speech_test(text_model, input_text) speech_model = SeamlessM4Tv2ForSpeechToText.from_pretrained(self.tmpdirname) for name, tensor in speech_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") self.update_generation(speech_model) speech_model.to(torch_device) speech_model.eval() output_speech = self.factory_generation_speech_test(speech_model, input_speech) # test same text output from input text self.assertListEqual(output_original_text[0].ravel().tolist(), output_text.ravel().tolist()) # test same speech output from input text self.assertListEqual(output_original_speech[0].ravel().tolist(), output_speech.ravel().tolist()) def test_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() input_speech["num_beams"] = 3 input_speech["do_sample"] = True input_speech["temperature"] = 0.5 input_speech["num_return_sequences"] = 3 input_text["num_beams"] = 3 input_text["do_sample"] = True input_text["temperature"] = 0.5 input_text["num_return_sequences"] = 3 for model_class in [SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, SeamlessM4Tv2Model]: model = model_class(config=config) self.update_generation(model) model.to(torch_device) model.eval() output = model.generate(**input_speech) output = output[0] if isinstance(output, tuple) else output self.assertEqual(output.shape[0], 3 * input_speech["input_features"].shape[0]) for model_class in [SeamlessM4Tv2ForTextToSpeech, SeamlessM4Tv2ForTextToText, SeamlessM4Tv2Model]: model = model_class(config=config) self.update_generation(model) model.to(torch_device) model.eval() output = model.generate(**input_text) output = output[0] if isinstance(output, tuple) else output self.assertEqual(output.shape[0], 3 * input_text["input_ids"].shape[0]) @require_torch class SeamlessM4Tv2ModelIntegrationTest(unittest.TestCase): repo_id = "facebook/seamless-m4t-v2-large" def assertListAlmostEqual(self, list1, list2, tol=1e-4): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) @cached_property def processor(self): return SeamlessM4TProcessor.from_pretrained(self.repo_id) @cached_property def input_text(self): # corresponds to "C'est un test." with seamlessM4T_medium checkpoint input_ids = torch.tensor([[256026, 109, 247729, 171, 128, 6816, 247676, 3]]) # fmt: skip input_ids = input_ids.to(torch_device) attention_mask = torch.ones_like(input_ids).to(torch_device) inputs = { "attention_mask": attention_mask, "input_ids": input_ids, } return inputs @cached_property def input_audio(self): set_seed(0) seq_len = 20000 sampling_rate = 16000 input_features = torch.rand((2, seq_len)) return self.processor(audios=[input_features.tolist()], sampling_rate=sampling_rate, return_tensors="pt").to( torch_device ) def factory_test_task(self, class1, class2, inputs, class1_kwargs, class2_kwargs): # half-precision loading to limit GPU usage model1 = class1.from_pretrained(self.repo_id, torch_dtype=torch.float16).to(torch_device) model2 = class2.from_pretrained(self.repo_id, torch_dtype=torch.float16).to(torch_device) set_seed(0) output_1 = model1.generate(**inputs, **class1_kwargs) set_seed(0) output_2 = model2.generate(**inputs, **class2_kwargs) for key in output_1: if isinstance(output_1[key], torch.Tensor): if len(output_1[key].shape) == 0: self.assertEqual(output_1[key].item(), output_2[key].item()) else: self.assertListAlmostEqual(output_1[key].squeeze().tolist(), output_2[key].squeeze().tolist()) @slow def test_to_eng_text(self): model = SeamlessM4Tv2Model.from_pretrained(self.repo_id).to(torch_device) # test text - tgt lang: eng expected_text_tokens = [3, 256022, 3080, 1, 247669, 10, 6816, 247676, 3] # fmt: skip # fmt: off expected_unit_tokens = [ 4746,7163,8208,8208,1315,1266,4307,1119,989,9594,3007,3007,4341,5205,7631,7631,3202,4061,9092,3191,7509,1715, 5280,5280,3554,8812,8197,6366,5382,5382,7330,2758,9433,9433,6863,7510,5800,5800,5286,1948,1825,1825,3956,8724, 8724,5331,8914,9315,9315,5288,2588,8167,8787,8787,8063,6008,2621,2621,2621,5696 ] # fmt: on expected_wav_slice = [9.485097e-04, 8.320558e-04, 7.178137e-04, 9.349979e-04, 1.121628e-03, 1.091766e-03, 1.279693e-03, 1.387754e-03, 1.296396e-03, 1.143557e-03] # fmt: skip set_seed(0) output = model.generate(**self.input_text, num_beams=1, tgt_lang="eng", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual( expected_unit_tokens, (output.unit_sequences - model.config.vocoder_offset).squeeze().tolist() ) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) # assert mean and std equality self.assertListAlmostEqual( [-2.349690e-04, 9.920777e-02], [output.waveform.mean().item(), output.waveform.std().item()] ) @slow @unittest.skip(reason="Equivalence is broken since a new update") def test_to_swh_text(self): model = SeamlessM4Tv2Model.from_pretrained(self.repo_id).to(torch_device) # test text - tgt lang: swh expected_text_tokens = [3, 256084, 109, 247729, 171, 10, 6816, 247676, 3] # fmt: skip # fmt: off expected_unit_tokens = [ 5725,7163,7472,7472,6915,3099,3099,9921,2765,6515,6515,1374,1374,1347,8252,9854,9854,5662,2420,6600,2216,4503, 7208,6107,6107,7298,9123,6472,9663,9663,6366,6366,6445,575,3575,2052,2052,5788,5800,5800,5286,5286,1825,1825,3956, 3956,8724,8724,5331,8914,8914,9315,9315,2821,8167,8167,8787,8787,8787,8700,8700,8700,2175,2175,3196,3196,2621,1725, 1725,7507,5696 ] # fmt: on expected_wav_slice = [3.124037e-04, 2.450471e-04, 2.286572e-04, 2.317214e-04, 2.732605e-04, 2.478790e-04, 2.704144e-04, 2.665847e-04, 2.828784e-04, 2.684390e-04] # fmt: skip set_seed(0) output = model.generate(**self.input_text, num_beams=1, tgt_lang="swh", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual( expected_unit_tokens, (output.unit_sequences - model.config.vocoder_offset).squeeze().tolist() ) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) # assert mean and std equality self.assertListAlmostEqual( [-2.001826e-04, 8.580012e-02], [output.waveform.mean().item(), output.waveform.std().item()] ) @slow def test_to_rus_speech(self): model = SeamlessM4Tv2Model.from_pretrained(self.repo_id).to(torch_device) # test audio - tgt lang: rus expected_text_tokens = [3, 256074, 107, 248213, 404, 247792, 247789, 3] # fmt: skip # fmt: off expected_unit_tokens = [ 8976,7163,6915,2728,2728,5198,3318,3318,3686,1049,9643,1200,2052,2052,8196,8196,7624,7624,7555,7555,7555,7555, 9717,9717,4869,8167,8167,8167,8053,972,9362,8167,297,297,297,3993,3993,3993,3993,4660,4660,4660,4660,4660,4660, 7962,7962,225,225,8737,4199 ] # fmt: on expected_wav_slice = [1.415287e-03, 1.360976e-03, 1.297727e-03, 1.305321e-03, 1.352087e-03, 1.283812e-03, 1.352623e-03, 1.387384e-03, 1.449627e-03, 1.411701e-03] # fmt: skip set_seed(0) output = model.generate(**self.input_audio, num_beams=1, tgt_lang="rus", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual( expected_unit_tokens, (output.unit_sequences - model.config.vocoder_offset).squeeze().tolist() ) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) # assert mean and std equality - higher tolerance for speech self.assertListAlmostEqual( [-2.818016e-04, 7.169888e-02], [output.waveform.mean().item(), output.waveform.std().item()], tol=5e-4 ) @slow def test_text_to_text_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False} kwargs2 = { "tgt_lang": "eng", "output_hidden_states": True, "return_dict_in_generate": True, "output_scores": True, } self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForTextToText, self.input_text, kwargs1, kwargs2) @slow def test_speech_to_text_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False} kwargs2 = { "tgt_lang": "eng", "output_hidden_states": True, "return_dict_in_generate": True, "output_scores": True, } self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForSpeechToText, self.input_audio, kwargs1, kwargs2) @slow def test_speech_to_speech_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True} self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForSpeechToSpeech, self.input_audio, kwargs1, kwargs1) @slow def test_text_to_speech_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True} self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForTextToSpeech, self.input_text, kwargs1, kwargs1)
transformers/tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py/0
{ "file_path": "transformers/tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py", "repo_id": "transformers", "token_count": 23297 }
433
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import unittest import numpy as np import pandas as pd from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TapasConfig, TapasTokenizer, is_tf_available, ) from transformers.models.auto import get_values from transformers.testing_utils import require_tensorflow_probability, require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, ) from transformers.models.tapas.modeling_tf_tapas import ( IndexMap, ProductIndexMap, flatten, gather, range_index_map, reduce_max, reduce_mean, reduce_sum, ) class TFTapasModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_position_embeddings=512, type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10], type_sequence_label_size=2, positive_weight=10.0, num_aggregation_labels=4, num_labels=2, aggregation_loss_importance=0.8, use_answer_as_supervision=True, answer_loss_importance=0.001, use_normalized_answer_loss=False, huber_loss_delta=25.0, temperature=1.0, agg_temperature=1.0, use_gumbel_for_cells=False, use_gumbel_for_agg=False, average_approximation_function="ratio", cell_selection_preference=0.5, answer_loss_cutoff=100, max_num_rows=64, max_num_columns=32, average_logits_per_cell=True, select_one_column=True, allow_empty_column_selection=False, init_cell_selection_weights_to_zero=True, reset_position_index_per_cell=True, disable_per_token_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.max_position_embeddings = max_position_embeddings self.type_vocab_sizes = type_vocab_sizes self.type_sequence_label_size = type_sequence_label_size self.positive_weight = positive_weight self.num_aggregation_labels = num_aggregation_labels self.num_labels = num_labels self.aggregation_loss_importance = aggregation_loss_importance self.use_answer_as_supervision = use_answer_as_supervision self.answer_loss_importance = answer_loss_importance self.use_normalized_answer_loss = use_normalized_answer_loss self.huber_loss_delta = huber_loss_delta self.temperature = temperature self.agg_temperature = agg_temperature self.use_gumbel_for_cells = use_gumbel_for_cells self.use_gumbel_for_agg = use_gumbel_for_agg self.average_approximation_function = average_approximation_function self.cell_selection_preference = cell_selection_preference self.answer_loss_cutoff = answer_loss_cutoff self.max_num_rows = max_num_rows self.max_num_columns = max_num_columns self.average_logits_per_cell = average_logits_per_cell self.select_one_column = select_one_column self.allow_empty_column_selection = allow_empty_column_selection self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero self.reset_position_index_per_cell = reset_position_index_per_cell self.disable_per_token_loss = disable_per_token_loss self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = [] for type_vocab_size in self.type_vocab_sizes: token_type_ids.append(ids_tensor(shape=[self.batch_size, self.seq_length], vocab_size=type_vocab_size)) token_type_ids = tf.stack(token_type_ids, axis=2) sequence_labels = None token_labels = None labels = None numeric_values = None numeric_values_scale = None float_answer = None aggregation_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) labels = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) numeric_values = ids_tensor([self.batch_size, self.seq_length], vocab_size=2, dtype=tf.float32) numeric_values_scale = ids_tensor([self.batch_size, self.seq_length], vocab_size=2, dtype=tf.float32) float_answer = ids_tensor([self.batch_size], vocab_size=2, dtype=tf.float32) aggregation_labels = ids_tensor([self.batch_size], self.num_aggregation_labels) config = self.get_config() return ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) def get_config(self): return TapasConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_sizes=self.type_vocab_sizes, initializer_range=self.initializer_range, positive_weight=self.positive_weight, num_aggregation_labels=self.num_aggregation_labels, num_labels=self.num_labels, aggregation_loss_importance=self.aggregation_loss_importance, use_answer_as_supervision=self.use_answer_as_supervision, answer_loss_importance=self.answer_loss_importance, use_normalized_answer_loss=self.use_normalized_answer_loss, huber_loss_delta=self.huber_loss_delta, temperature=self.temperature, agg_temperature=self.agg_temperature, use_gumbel_for_cells=self.use_gumbel_for_cells, use_gumbel_for_agg=self.use_gumbel_for_agg, average_approximation_function=self.average_approximation_function, cell_selection_preference=self.cell_selection_preference, answer_loss_cutoff=self.answer_loss_cutoff, max_num_rows=self.max_num_rows, max_num_columns=self.max_num_columns, average_logits_per_cell=self.average_logits_per_cell, select_one_column=self.select_one_column, allow_empty_column_selection=self.allow_empty_column_selection, init_cell_selection_weights_to_zero=self.init_cell_selection_weights_to_zero, reset_position_index_per_cell=self.reset_position_index_per_cell, disable_per_token_loss=self.disable_per_token_loss, ) def create_and_check_model( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TFTapasModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) inputs.pop("attention_mask") result = model(inputs) inputs.pop("token_type_ids") result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TFTapasForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": token_labels, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): config.num_labels = self.num_labels model = TFTapasForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "labels": sequence_labels, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): # inference: without aggregation head (SQA). Model only returns logits sqa_config = copy.copy(config) sqa_config.num_aggregation_labels = 0 sqa_config.use_answer_as_supervision = False model = TFTapasForQuestionAnswering(config=sqa_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # inference: with aggregation head (WTQ, WikiSQL-supervised). Model returns logits and aggregation logits model = TFTapasForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) # training: can happen in 3 main ways # case 1: conversational (SQA) model = TFTapasForQuestionAnswering(config=sqa_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # case 2: weak supervision for aggregation (WTQ) model = TFTapasForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, "numeric_values": numeric_values, "numeric_values_scale": numeric_values_scale, "float_answer": float_answer, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) # case 3: strong supervision for aggregation (WikiSQL-supervised) wikisql_config = copy.copy(config) wikisql_config.use_answer_as_supervision = False model = TFTapasForQuestionAnswering(config=wikisql_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, "aggregation_labels": aggregation_labels, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tensorflow_probability @require_tf class TFTapasModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFTapasModel, TFTapasForMaskedLM, TFTapasForSequenceClassification, TFTapasForQuestionAnswering, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFTapasModel, "fill-mask": TFTapasForMaskedLM, "text-classification": TFTapasForSequenceClassification, "zero-shot": TFTapasForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) inputs_dict["aggregation_labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["numeric_values"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.float32 ) inputs_dict["numeric_values_scale"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.float32 ) inputs_dict["float_answer"] = tf.zeros(self.model_tester.batch_size, dtype=tf.float32) elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), ]: inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict def setUp(self): self.model_tester = TFTapasModelTester(self) self.config_tester = ConfigTester(self, config_class=TapasConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_dataset_conversion(self): pass @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_keras_fit(self): pass @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_loss_computation(self): pass @unittest.skip("tfp is not defined even if installed. FIXME @Arthur in a followup PR!") def test_pt_tf_model_equivalence(self): pass def prepare_tapas_single_inputs_for_inference(): # Here we prepare a single table-question pair to test TAPAS inference on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], } queries = "Which footballer is 33 years old?" table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_inference(): # Here we prepare a batch of 2 table-question pairs to test TAPAS inference on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "How many goals does Ronaldo have?"] table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_training(): # Here we prepare a DIFFERENT batch of 2 table-question pairs to test TAPAS training on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "What's the total number of goals?"] table = pd.DataFrame.from_dict(data) answer_coordinates = [[(0, 0)], [(0, 2), (1, 2)]] answer_text = [["Lionel Messi"], ["1462"]] float_answer = [float("NaN"), float("1462")] return table, queries, answer_coordinates, answer_text, float_answer @require_tensorflow_probability @require_tf class TFTapasModelIntegrationTest(unittest.TestCase): @cached_property def default_tokenizer(self): return TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") @slow def test_inference_no_head(self): # ideally we want to test this with the weights of tapas_inter_masklm_base_reset, # but since it's not straightforward to do this with the TF 1 implementation, we test it with # the weights of the WTQ base model (i.e. tapas_wtq_wikisql_sqa_inter_masklm_base_reset) model = TFTapasModel.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the sequence output expected_slice = tf.constant( [ [ [-0.141581565, -0.599805772, 0.747186482], [-0.143664181, -0.602008104, 0.749218345], [-0.15169853, -0.603363097, 0.741370678], ] ] ) tf.debugging.assert_near(outputs.last_hidden_state[:, :3, :3], expected_slice, atol=0.0005) # test the pooled output expected_slice = tf.constant([[0.987518311, -0.970520139, -0.994303405]]) tf.debugging.assert_near(outputs.pooler_output[:, :3], expected_slice, atol=0.0005) @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass # TapasForQuestionAnswering has 3 possible ways of being fine-tuned: # - conversational set-up (SQA) # - weak supervision for aggregation (WTQ, WikiSQL) # - strong supervision for aggregation (WikiSQL-supervised) # We test all of them: @slow def test_inference_question_answering_head_conversational(self): # note that google/tapas-base-finetuned-sqa should correspond to tapas_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-sqa") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -16.262585, -10004.089, 15.435196, 15.435196, 15.435196, -9990.443, -16.327433, -16.327433, -16.327433, -16.327433, -16.327433, -10004.84, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.015) @slow def test_inference_question_answering_head_conversational_absolute_embeddings(self): # note that google/tapas-small-finetuned-sqa should correspond to tapas_sqa_inter_masklm_small_reset # however here we test the version with absolute position embeddings model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-small-finetuned-sqa") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -18.369339, -10014.692, 17.730324, 17.730324, 17.730324, -9984.974, -18.322773, -18.322773, -18.322773, -18.322773, -18.322773, -10007.267, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.01) @slow def test_inference_question_answering_head_weak_supervision(self): # note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer # let's test on a batch table, queries = prepare_tapas_batch_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([2, 28]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [-160.375504, -160.375504, -160.375504, -10072.3965, -10070.9414, -10094.9736], [-9861.6123, -9861.6123, -9861.6123, -9861.6123, -9891.01172, 146.600677], ] ) tf.debugging.assert_near(logits[:, -6:], expected_slice, atol=0.4) # test the aggregation logits logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([2, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant( [[18.8545208, -9.76614857, -6.3128891, -2.93525243], [-4.05782509, 40.0351, -5.35329962, 23.3978653]] ) tf.debugging.assert_near(logits_aggregation, expected_tensor, atol=0.001) # test the predicted answer coordinates and aggregation indices EXPECTED_PREDICTED_ANSWER_COORDINATES = [[(0, 0)], [(1, 2)]] EXPECTED_PREDICTED_AGGREGATION_INDICES = [0, 1] predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits, outputs.logits_aggregation ) tf.debugging.assert_equal(EXPECTED_PREDICTED_ANSWER_COORDINATES, predicted_answer_coordinates) tf.debugging.assert_equal(EXPECTED_PREDICTED_AGGREGATION_INDICES, predicted_aggregation_indices) @slow def test_training_question_answering_head_weak_supervision(self): # note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer # let's test on a batch table, queries, answer_coordinates, answer_text, float_answer = prepare_tapas_batch_inputs_for_training() inputs = tokenizer( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding="longest", return_tensors="tf", ) # the answer should be prepared by the user float_answer = tf.constant(float_answer, dtype=tf.float32) outputs = model( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], labels=inputs["labels"], numeric_values=inputs["numeric_values"], numeric_values_scale=inputs["numeric_values_scale"], float_answer=float_answer, ) # test the loss loss = outputs.loss expected_loss = tf.constant(3.3527612686157227e-08) tf.debugging.assert_near(loss, expected_loss, atol=1e-6) # test the logits on the first example logits = outputs.logits expected_shape = tf.TensorShape([2, 29]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ -160.0156, -160.0156, -160.0156, -160.0156, -160.0156, -10072.2266, -10070.8896, -10092.6006, -10092.6006, ] ) tf.debugging.assert_near(logits[0, -9:], expected_slice, atol=1e-6) # test the aggregation logits on the second example logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([2, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant([-4.0538, 40.0304, -5.3554, 23.3965]) tf.debugging.assert_near(logits_aggregation[1, -4:], expected_tensor, atol=1e-4) @slow def test_inference_question_answering_head_strong_supervision(self): # note that google/tapas-base-finetuned-wikisql-supervised should correspond to tapas_wikisql_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wikisql-supervised") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -18.6185989, -10008.7969, 17.6355762, 17.6355762, 17.6355762, -10002.4404, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -10007.0977, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.02) # test the aggregation logits logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([1, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant([[16.5659733, -3.06624889, -2.34152961, -0.970244825]]) tf.debugging.assert_near(logits_aggregation, expected_tensor, atol=0.003) @slow def test_inference_classification_head(self): # note that google/tapas-base-finetuned-tabfact should correspond to tapas_tabfact_inter_masklm_base_reset model = TFTapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the classification logits logits = outputs.logits expected_shape = tf.TensorShape([1, 2]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant([[0.795137286, 9.5572]]) tf.debugging.assert_near(logits, expected_slice, atol=0.05) # Below: tests for Tapas utilities which are defined in modeling_tf_tapas.py. # These are based on segmented_tensor_test.py of the original implementation. # URL: https://github.com/google-research/tapas/blob/master/tapas/models/segmented_tensor_test.py @require_tensorflow_probability class TFTapasUtilsTest(unittest.TestCase): def _prepare_tables(self): """Prepares two tables, both with three distinct rows. The first table has two columns: 1.0, 2.0 | 3.0 2.0, 0.0 | 1.0 1.0, 3.0 | 4.0 The second table has three columns: 1.0 | 2.0 | 3.0 2.0 | 0.0 | 1.0 1.0 | 3.0 | 4.0 Returns: SegmentedTensors with the tables. """ values = tf.constant( [ [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], ] ) row_index = IndexMap( indices=[ [[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]], ], num_segments=3, batch_dims=1, ) col_index = IndexMap( indices=[ [[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]], ], num_segments=3, batch_dims=1, ) return values, row_index, col_index def test_product_index(self): _, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_index_proj = cell_index.project_outer(cell_index) col_index_proj = cell_index.project_inner(cell_index) ind = cell_index.indices self.assertEqual(cell_index.num_segments, 9) # Projections should give back the original indices. # we use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(row_index.indices.numpy(), row_index_proj.indices.numpy()) self.assertEqual(row_index.num_segments, row_index_proj.num_segments) self.assertEqual(row_index.batch_dims, row_index_proj.batch_dims) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(col_index.indices.numpy(), col_index_proj.indices.numpy()) self.assertEqual(col_index.batch_dims, col_index_proj.batch_dims) # The first and second "column" are identified in the first table. for i in range(3): self.assertEqual(ind[0, i, 0], ind[0, i, 1]) self.assertNotEqual(ind[0, i, 0], ind[0, i, 2]) # All rows are distinct in the first table. for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 and j != j_2: self.assertNotEqual(ind[0, i, j], ind[0, i_2, j_2]) # All cells are distinct in the second table. for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 or j != j_2: self.assertNotEqual(ind[1, i, j], ind[1, i_2, j_2]) def test_flatten(self): _, row_index, col_index = self._prepare_tables() row_index_flat = flatten(row_index) col_index_flat = flatten(col_index) shape = [3, 4, 5] batched_index = IndexMap(indices=tf.zeros(shape, dtype=tf.int32), num_segments=1, batch_dims=3) batched_index_flat = flatten(batched_index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal( row_index_flat.indices.numpy(), [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5] ) np.testing.assert_array_equal( col_index_flat.indices.numpy(), [0, 0, 1, 0, 0, 1, 0, 0, 1, 3, 4, 5, 3, 4, 5, 3, 4, 5] ) self.assertEqual(batched_index_flat.num_segments.numpy(), np.prod(shape)) np.testing.assert_array_equal(batched_index_flat.indices.numpy(), range(np.prod(shape))) def test_range_index_map(self): batch_shape = [3, 4] num_segments = 5 index = range_index_map(batch_shape, num_segments) self.assertEqual(num_segments, index.num_segments) self.assertEqual(2, index.batch_dims) indices = index.indices # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(list(indices.shape), [3, 4, 5]) for i in range(batch_shape[0]): for j in range(batch_shape[1]): # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(indices[i, j, :].numpy(), range(num_segments)) def test_reduce_sum(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_sum, _ = reduce_sum(values, row_index) col_sum, _ = reduce_sum(values, col_index) cell_sum, _ = reduce_sum(values, cell_index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose(row_sum.numpy(), [[6.0, 3.0, 8.0], [6.0, 3.0, 8.0]]) np.testing.assert_allclose(col_sum.numpy(), [[9.0, 8.0, 0.0], [4.0, 5.0, 8.0]]) np.testing.assert_allclose( cell_sum.numpy(), [[3.0, 3.0, 0.0, 2.0, 1.0, 0.0, 4.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0]], ) def test_reduce_mean(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_mean, _ = reduce_mean(values, row_index) col_mean, _ = reduce_mean(values, col_index) cell_mean, _ = reduce_mean(values, cell_index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose( row_mean.numpy(), [[6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0], [6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0]] ) np.testing.assert_allclose(col_mean.numpy(), [[9.0 / 6.0, 8.0 / 3.0, 0.0], [4.0 / 3.0, 5.0 / 3.0, 8.0 / 3.0]]) np.testing.assert_allclose( cell_mean.numpy(), [ [3.0 / 2.0, 3.0, 0.0, 2.0 / 2.0, 1.0, 0.0, 4.0 / 2.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0], ], ) def test_reduce_max(self): values = tf.convert_to_tensor([2.0, 1.0, 0.0, 3.0]) index = IndexMap(indices=tf.convert_to_tensor([0, 1, 0, 1]), num_segments=2) maximum, _ = reduce_max(values, index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(maximum.numpy(), [2, 3]) def test_reduce_sum_vectorized(self): values = tf.convert_to_tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]) index = IndexMap(indices=tf.convert_to_tensor([0, 0, 1]), num_segments=2, batch_dims=0) sums, new_index = reduce_sum(values, index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose(sums.numpy(), [[3.0, 5.0, 7.0], [3.0, 4.0, 5.0]]) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(new_index.indices.numpy(), [0, 1]) np.testing.assert_array_equal(new_index.num_segments.numpy(), 2) np.testing.assert_array_equal(new_index.batch_dims, 0) def test_gather(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) # Compute sums and then gather. The result should have the same shape as # the original table and each element should contain the sum the values in # its cell. sums, _ = reduce_sum(values, cell_index) cell_sum = gather(sums, cell_index) assert cell_sum.shape == values.shape # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_allclose( cell_sum.numpy(), [[[3.0, 3.0, 3.0], [2.0, 2.0, 1.0], [4.0, 4.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]], ) def test_gather_vectorized(self): values = tf.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) index = IndexMap(indices=tf.convert_to_tensor([[0, 1], [1, 0]]), num_segments=2, batch_dims=1) result = gather(values, index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(result.numpy(), [[[1, 2], [3, 4]], [[7, 8], [5, 6]]])
transformers/tests/models/tapas/test_modeling_tf_tapas.py/0
{ "file_path": "transformers/tests/models/tapas/test_modeling_tf_tapas.py", "repo_id": "transformers", "token_count": 21164 }
434
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class VisionTextDualEncoderProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: skip self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) image_processor_map = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return ViTImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() image_processor = self.get_image_processor() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) processor.save_pretrained(self.tmpdirname) processor = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor.image_processor, ViTImageProcessor) def test_save_load_pretrained_additional_features(self): processor = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, ViTImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with self.assertRaises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = VisionTextDualEncoderProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), processor.model_input_names)
transformers/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py/0
{ "file_path": "transformers/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 2796 }
435
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import warnings import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VitMatteImageProcessor class VitMatteImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_rescale=True, rescale_factor=0.5, do_pad=True, size_divisibility=10, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): super().__init__() self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.size_divisibility = size_divisibility self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, "size_divisibility": self.size_divisibility, } def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class VitMatteImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VitMatteImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = VitMatteImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size_divisibility")) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values # Verify that width and height can be divided by size_divisibility self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values # Verify that width and height can be divided by size_divisibility self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.size[::-1]) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values # Verify that width and height can be divided by size_divisibility self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_call_numpy_4_channels(self): # Test that can process images which have an arbitrary number of channels # Initialize image_processing image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) encoded_images = image_processor( images=image, trimaps=trimap, input_data_format="channels_first", image_mean=0, image_std=1, return_tensors="pt", ).pixel_values # Verify that width and height can be divided by size_divisibility self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisibility == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisibility == 0) def test_padding(self): image_processing = self.image_processing_class(**self.image_processor_dict) image = np.random.randn(3, 249, 491) images = image_processing.pad_image(image) assert images.shape == (3, 256, 512) image = np.random.randn(3, 249, 512) images = image_processing.pad_image(image) assert images.shape == (3, 256, 512) def test_image_processor_preprocess_arguments(self): # vitmatte require additional trimap input for image_processor # that is why we override original common test for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) image = self.image_processor_tester.prepare_image_inputs()[0] trimap = np.random.randint(0, 3, size=image.size[::-1]) with warnings.catch_warnings(record=True) as raised_warnings: warnings.simplefilter("always") image_processor(image, trimaps=trimap, extra_argument=True) messages = " ".join([str(w.message) for w in raised_warnings]) self.assertGreaterEqual(len(raised_warnings), 1) self.assertIn("extra_argument", messages)
transformers/tests/models/vitmatte/test_image_processing_vitmatte.py/0
{ "file_path": "transformers/tests/models/vitmatte/test_image_processing_vitmatte.py", "repo_id": "transformers", "token_count": 3675 }
436
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Wav2Vec2-BERT model.""" import tempfile import unittest from datasets import load_dataset from transformers import Wav2Vec2BertConfig, is_torch_available from transformers.testing_utils import ( is_pt_flax_cross_test, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoFeatureExtractor, Wav2Vec2BertForAudioFrameClassification, Wav2Vec2BertForCTC, Wav2Vec2BertForSequenceClassification, Wav2Vec2BertForXVector, Wav2Vec2BertModel, ) from transformers.models.wav2vec2_bert.modeling_wav2vec2_bert import ( _compute_mask_indices, _sample_negative_indices, ) # Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerModelTester with Conformer->Bert, input_values->input_features class Wav2Vec2BertModelTester: # Ignore copy def __init__( self, parent, batch_size=13, seq_length=200, # speech is longer is_training=False, hidden_size=16, feature_projection_input_dim=16, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, num_adapter_layers=2, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, position_embeddings_type="relative", scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feature_projection_input_dim = feature_projection_input_dim self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim self.position_embeddings_type = position_embeddings_type self.output_seq_length = self.seq_length self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = self.output_seq_length for _ in range(num_adapter_layers): self.adapter_output_seq_length = (self.adapter_output_seq_length - 1) // adapter_stride + 1 # Ignore copy def prepare_config_and_inputs(self, position_embeddings_type="relative"): input_shape = [self.batch_size, self.seq_length, self.feature_projection_input_dim] input_features = floats_tensor(input_shape, self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config(position_embeddings_type=position_embeddings_type) return config, input_features, attention_mask # Ignore copy def get_config(self, position_embeddings_type="relative"): return Wav2Vec2BertConfig( hidden_size=self.hidden_size, feature_projection_input_dim=self.feature_projection_input_dim, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, do_stable_layer_norm=self.do_stable_layer_norm, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, position_embeddings_type=position_embeddings_type, ) def create_and_check_model(self, config, input_features, attention_mask): model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_features, attention_mask): config.add_adapter = True model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_for_ctc(self, config, input_features, attention_mask): config.add_adapter = True config.output_hidden_size = 2 * config.hidden_size model = Wav2Vec2BertForCTC(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size) ) # Ignore copy def create_and_check_model_with_intermediate_ffn_before_adapter(self, config, input_features, attention_mask): config.add_adapter = True config.use_intermediate_ffn_before_adapter = True model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) # also try with different adapter proj dim config.output_hidden_size = 8 model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_model_with_adapter_proj_dim(self, config, input_features, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() result = model(input_features, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_model_float16(self, config, input_features, attention_mask): model = Wav2Vec2BertModel(config=config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = Wav2Vec2BertModel.from_pretrained(tmpdirname, torch_dtype=torch.float16) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_features.type(dtype=torch.float16), attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_features, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = Wav2Vec2BertModel(config=config) model.to(torch_device) model.eval() input_features = input_features[:3] attention_mask = torch.ones(input_features.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_features.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_features, attention_mask=attention_mask).last_hidden_state for i in range(input_features.shape[0]): input_slice = input_features[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_features, *args): model = Wav2Vec2BertForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_features = input_features[:3] # Ignore copy attention_mask = torch.ones(input_features.shape[:2], device=torch_device, dtype=torch.long) input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_features.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_features, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_features, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_features, *args): model = Wav2Vec2BertForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_features = input_features[:3] # Ignore copy attention_mask = torch.ones(input_features.shape[:2], device=torch_device, dtype=torch.long) input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]] labels = ids_tensor((input_features.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_features, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_features, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_features, *args): config.ctc_zero_infinity = True model = Wav2Vec2BertForCTC(config=config) model.to(torch_device) model.train() # Ignore copy input_features = input_features[:3] input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_features.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_features, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_features, *args): config.ctc_zero_infinity = True model = Wav2Vec2BertForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_features = input_features[:3] # Ignore copy input_lengths = [input_features.shape[1] // i for i in [4, 2, 1]] labels = ids_tensor((input_features.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 loss = model(input_features, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_features, *args): config.ctc_zero_infinity = True model = Wav2Vec2BertForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_features = input_features[:3] input_lengths = [input_features.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_features.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_features[i, input_lengths[i] :] = 0.0 loss = model(input_features, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_features, *args): model = Wav2Vec2BertForCTC(config) model.to(torch_device) model.train() input_features = input_features[:3] input_lengths = [input_features.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_features.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_features, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_features, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_features": input_features, "attention_mask": attention_mask} return config, inputs_dict @require_torch # Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerModelTest with Conformer->Bert, input_values->input_features class Wav2Vec2BertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): # Ignore copy all_model_classes = ( ( Wav2Vec2BertForCTC, Wav2Vec2BertModel, Wav2Vec2BertForSequenceClassification, Wav2Vec2BertForAudioFrameClassification, Wav2Vec2BertForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Wav2Vec2BertForSequenceClassification, "automatic-speech-recognition": Wav2Vec2BertForCTC, "feature-extraction": Wav2Vec2BertModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = Wav2Vec2BertModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2BertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model(*config_and_inputs) # Ignore copy def test_model_with_relative_key(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative_key") self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_no_rel_pos(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type=None) self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_for_ctc(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs) # Ignore copy def test_model_with_intermediate_ffn_before_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_intermediate_ffn_before_adapter(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model_float16(*config_and_inputs) # Ignore copy @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_relative_key(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative_key") self.model_tester.create_and_check_model_float16(*config_and_inputs) @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model_float16(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # Ignore copy @unittest.skip(reason="Wav2Vec2Bert has no inputs_embeds") def test_inputs_embeds(self): pass # Ignore copy @unittest.skip(reason="`input_ids` is renamed to `input_features`") def test_forward_signature(self): pass # Ignore copy @unittest.skip(reason="Wav2Vec2Bert has no tokens embeddings") def test_resize_tokens_embeddings(self): pass # Ignore copy @unittest.skip(reason="Wav2Vec2Bert has no inputs_embeds") def test_model_get_set_embeddings(self): pass # Ignore copy @unittest.skip(reason="non-robust architecture does not exist in Flax") @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): pass # Ignore copy @unittest.skip(reason="non-robust architecture does not exist in Flax") @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_features = inputs_dict["input_features"] input_lengths = torch.tensor( [input_features.shape[1] for _ in range(input_features.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_features.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "pos_bias_u") and module.pos_bias_u is not None: module.pos_bias_u.data.fill_(3) if hasattr(module, "pos_bias_v") and module.pos_bias_v is not None: module.pos_bias_v.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) # Ignore copy @unittest.skip(reason="Kept to make #Copied from working") def test_mask_feature_prob_ctc(self): pass # Ignore copy @unittest.skip(reason="Kept to make #Copied from working") def test_mask_time_prob_ctc(self): pass @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): # Ignore copy model = Wav2Vec2BertModel.from_pretrained("facebook/w2v-bert-2.0") self.assertIsNotNone(model) @require_torch # Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerUtilsTest with Conformer->Bert, input_values->input_features class Wav2Vec2BertUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): # with these settings num_masked_spans=0.5, which means probabilistic rounding # ensures that in 5 out of 10 method calls, num_masked_spans=0, and in # the other 5 out of 10, cases num_masked_spans=1 n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 # as we test for at least 10 masked dimension and at least # 10 non-masked dimension, this test could fail with probability: # P(100 coin flips, at most 9 heads) = 1.66e-18 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) # force one example to be heavily padded attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) # make sure that non-padded examples cannot be padded self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) # Ignore copy @unittest.skip(reason="Kept to make #Copied from working. Test a class used for pretraining, not yet supported.") def test_compute_perplexity(self): pass def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # sample negative indices sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 # second half of last input tensor is padded mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # replace masked feature vectors with -100 to test that those are not sampled features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) # sample negative indices sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @slow class Wav2Vec2BertModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter(lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]) speech_samples = speech_samples[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_w2v2_bert(self): model = Wav2Vec2BertModel.from_pretrained("facebook/w2v-bert-2.0") model.to(torch_device) feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0") input_speech = self._load_datasamples(2) inputs = feature_extractor(input_speech, return_tensors="pt", padding=True).to(torch_device) model.eval() with torch.no_grad(): outputs = model(**inputs, output_attentions=True) # fmt: off expected_slice_0 = torch.tensor( [[-0.0098, -0.0570, -0.1286, 0.0439, -0.1037, -0.0235], [-0.0767, 0.0574, -0.3224, 0.0482, 0.0440, -0.0193], [ 0.0220, -0.0878, -0.2027, -0.0028, -0.0666, 0.0721], [ 0.0307, -0.1099, 0.0273, -0.0416, -0.0715, 0.0094], [ 0.0758, -0.0291, 0.1084, 0.0004, -0.0751, -0.0116], [ 0.0349, -0.0343, -0.0098, 0.0415, -0.0617, 0.0241], [-0.0193, -0.0171, 0.1965, 0.0797, -0.0308, 0.2033], [-0.0323, -0.0315, 0.0948, 0.0944, -0.0254, 0.1241], [-0.0493, 0.0010, -0.1762, 0.0034, -0.0787, 0.0832], [ 0.0043, -0.1228, -0.0739, 0.0266, -0.0337, -0.0068]] ).to(torch_device) # fmt: on # fmt: off expected_slice_1 = torch.tensor( [[-0.0348, -0.0521, -0.3036, 0.0285, -0.0715, -0.0453], [-0.0102, 0.0114, -0.3266, 0.0027, -0.0558, 0.0038], [ 0.0454, 0.0148, -0.2418, -0.0392, -0.0455, 0.0478], [-0.0013, 0.0825, -0.1730, -0.0091, -0.0426, 0.0360], [-0.0227, 0.0687, -0.1168, 0.0569, -0.0160, 0.0759], [-0.0318, 0.0562, -0.0508, 0.0605, 0.0150, 0.0953], [-0.0415, 0.0438, 0.0233, 0.0336, 0.0262, 0.0860], [-0.0163, 0.0048, 0.0807, 0.0119, 0.0712, 0.0158], [ 0.0244, -0.0145, 0.0262, -0.0237, 0.0283, -0.0125], [-0.0587, -0.0516, -0.0368, -0.0196, 0.0307, -0.1434]] ).to(torch_device) # fmt: on self.assertTrue((outputs.last_hidden_state[0, 25:35, 4:10] - expected_slice_0).abs().max() <= 1e-4) self.assertTrue((outputs.last_hidden_state[1, 25:35, 4:10] - expected_slice_1).abs().max() <= 1e-4) self.assertAlmostEqual(outputs.last_hidden_state[1].mean().item(), 3.3123e-05) self.assertAlmostEqual(outputs.last_hidden_state[1].std().item(), 0.1545, delta=2e-5) self.assertListEqual(list(outputs.last_hidden_state.shape), [2, 326, 1024])
transformers/tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py/0
{ "file_path": "transformers/tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py", "repo_id": "transformers", "token_count": 17014 }
437
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ZoeDepth model.""" import unittest from transformers import Dinov2Config, ZoeDepthConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ZoeDepthForDepthEstimation if is_vision_available(): from PIL import Image from transformers import ZoeDepthImageProcessor class ZoeDepthModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=32, patch_size=16, use_labels=True, num_labels=3, is_training=True, hidden_size=4, num_hidden_layers=2, num_attention_heads=2, intermediate_size=8, out_features=["stage1", "stage2"], apply_layernorm=False, reshape_hidden_states=False, neck_hidden_sizes=[2, 2], fusion_hidden_size=6, bottleneck_features=6, num_out_features=[6, 6, 6, 6], ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.out_features = out_features self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states self.use_labels = use_labels self.num_labels = num_labels self.is_training = is_training self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size self.bottleneck_features = bottleneck_features self.num_out_features = num_out_features # ZoeDepth's sequence length self.seq_length = (self.image_size // self.patch_size) ** 2 + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return ZoeDepthConfig( backbone_config=self.get_backbone_config(), backbone=None, neck_hidden_sizes=self.neck_hidden_sizes, fusion_hidden_size=self.fusion_hidden_size, bottleneck_features=self.bottleneck_features, num_out_features=self.num_out_features, ) def get_backbone_config(self): return Dinov2Config( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, is_training=self.is_training, out_features=self.out_features, reshape_hidden_states=self.reshape_hidden_states, ) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = ZoeDepthForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ZoeDepthModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ZoeDepth does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ZoeDepthForDepthEstimation,) if is_torch_available() else () pipeline_model_mapping = {"depth-estimation": ZoeDepthForDepthEstimation} if is_torch_available() else {} test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ZoeDepthModelTester(self) self.config_tester = ConfigTester( self, config_class=ZoeDepthConfig, has_text_modality=False, hidden_size=37, common_properties=[] ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ZoeDepth with AutoBackbone does not have a base model and hence no input_embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="ZoeDepth with AutoBackbone does not have a base model and hence no input_embeddings") def test_model_get_set_embeddings(self): pass def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) @unittest.skip(reason="ZoeDepth with AutoBackbone does not have a base model and hence no input_embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="ZoeDepth with AutoBackbone does not have a base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="ZoeDepth with AutoBackbone does not have a base model") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="ZoeDepth does not support training yet") def test_training(self): pass @unittest.skip(reason="ZoeDepth does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="ZoeDepth does not support training yet") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="ZoeDepth does not support training yet") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "Intel/zoedepth-nyu" model = ZoeDepthForDepthEstimation.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class ZoeDepthModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation(self): image_processor = ZoeDepthImageProcessor.from_pretrained("Intel/zoedepth-nyu") model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 384, 512)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[1.0020, 1.0219, 1.0389], [1.0349, 1.0816, 1.1000], [1.0576, 1.1094, 1.1249]], ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) def test_inference_depth_estimation_multiple_heads(self): image_processor = ZoeDepthImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti") model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 384, 512)) self.assertEqual(predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[1.1571, 1.1438, 1.1783], [1.2163, 1.2036, 1.2320], [1.2688, 1.2461, 1.2734]], ).to(torch_device) self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/zoedepth/test_modeling_zoedepth.py/0
{ "file_path": "transformers/tests/models/zoedepth/test_modeling_zoedepth.py", "repo_id": "transformers", "token_count": 3883 }
438
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, AutoImageProcessor, AutoModelForImageToImage, ImageToImagePipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class ImageToImagePipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] @require_torch @require_vision @slow def test_pipeline(self, torch_dtype="float32"): model_id = "caidas/swin2SR-classical-sr-x2-64" upscaler = pipeline("image-to-image", model=model_id, torch_dtype=torch_dtype) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976)) @require_torch @require_vision @slow def test_pipeline_fp16(self): self.test_pipeline(torch_dtype="float16") @require_torch @require_vision @slow def test_pipeline_model_processor(self): model_id = "caidas/swin2SR-classical-sr-x2-64" model = AutoModelForImageToImage.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) upscaler = ImageToImagePipeline(model=model, image_processor=image_processor) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976))
transformers/tests/pipelines/test_pipelines_image_to_image.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_to_image.py", "repo_id": "transformers", "token_count": 1141 }
439
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class ZeroShotAudioClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLAP would be there for now. # model_mapping = {CLAPConfig: CLAPModel} @require_torch def test_small_model_pt(self, torch_dtype="float32"): audio_classifier = pipeline( task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused", torch_dtype=torch_dtype, ) dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], ) @require_torch def test_small_model_pt_fp16(self): self.test_small_model_pt(torch_dtype="float16") @unittest.skip(reason="No models are available in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", ) # This is an audio of a dog dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ {"score": 1.0, "label": "Sound of a dog"}, {"score": 0.0, "label": "Sound of vaccum cleaner"}, ], ) output = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ [ {"score": 1.0, "label": "Sound of a dog"}, {"score": 0.0, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) output = audio_classifier( [audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 ) self.assertEqual( nested_simplify(output), [ [ {"score": 1.0, "label": "Sound of a dog"}, {"score": 0.0, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) @unittest.skip(reason="No models are available in TF") def test_large_model_tf(self): pass
transformers/tests/pipelines/test_pipelines_zero_shot_audio_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_zero_shot_audio_classification.py", "repo_id": "transformers", "token_count": 1610 }
440
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from transformers import AddedToken, AutoModelForCausalLM, AutoTokenizer from transformers.testing_utils import require_gguf, require_torch_gpu, slow, torch_device from transformers.utils import is_torch_available if is_torch_available(): import torch @require_gguf @require_torch_gpu @slow class GgufIntegrationTests(unittest.TestCase): original_model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" model_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" mistral_model_id = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF" qwen2_model_id = "Qwen/Qwen1.5-0.5B-Chat-GGUF" llama3_model_id = "NousResearch/Meta-Llama-3-8B-GGUF" tinyllama_model_id = "PenutChen/TinyLlama-1.1B-Chat-v1.0-GGUF" q4_0_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q4_0.gguf" q4_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" q2_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q2_K.gguf" q3_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q3_K_L.gguf" q5_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q5_K_M.gguf" q6_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q6_K.gguf" q8_0_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q8_0.gguf" q4_0_mistral_model_id = "mistral-7b-instruct-v0.2.Q4_0.gguf" q4_0_qwen2_model_id = "qwen1_5-0_5b-chat-q4_0.gguf" q4_llama3_model_id = "Meta-Llama-3-8B-Q4_K_M.gguf" f16_tinyllama_model_id = "TinyLlama-1.1B-Chat-v1.0.FP16.gguf" example_text = "Hello" def test_q2_k(self): tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q2_k_gguf_model_id) model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q2_k_gguf_model_id).to(torch_device) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, World!\n\n[10:0" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_q2_k_serialization(self): tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q2_k_gguf_model_id) model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q2_k_gguf_model_id).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) tokenizer.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(tmpdirname) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, World!\n\n[10:0" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_q3_k(self): tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q3_k_gguf_model_id) model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q3_k_gguf_model_id).to(torch_device) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, World!\n\n```\n<|user" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_q5_k(self): tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q5_k_gguf_model_id) model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q5_k_gguf_model_id).to(torch_device) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, World!\n\nStep 3: Add" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_q4_0(self): tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q4_0_gguf_model_id) model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q4_0_gguf_model_id).to(torch_device) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, World!\n\nStep 3: Add" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_q4_k_m(self): tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q4_k_gguf_model_id) model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q4_k_gguf_model_id).to(torch_device) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, World!\n\n5. Python:\n" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_q6_k(self): tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q6_k_gguf_model_id) model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q6_k_gguf_model_id).to(torch_device) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, World!\n\nStep 3: Add" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_q6_k_fp16(self): tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q6_k_gguf_model_id) model = AutoModelForCausalLM.from_pretrained( self.model_id, gguf_file=self.q6_k_gguf_model_id, torch_dtype=torch.float16 ).to(torch_device) self.assertTrue(model.lm_head.weight.dtype == torch.float16) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, World!\n\nStep 3: Add" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_q8_0(self): tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q8_0_gguf_model_id) model = AutoModelForCausalLM.from_pretrained(self.model_id, gguf_file=self.q8_0_gguf_model_id).to(torch_device) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, World!\n\n5. Use a library" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_f16(self): tokenizer = AutoTokenizer.from_pretrained(self.tinyllama_model_id, gguf_file=self.f16_tinyllama_model_id) model = AutoModelForCausalLM.from_pretrained( self.tinyllama_model_id, gguf_file=self.f16_tinyllama_model_id ).to(torch_device) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, World!\n\n5. Node.js" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_mistral_q4_0(self): tokenizer = AutoTokenizer.from_pretrained(self.mistral_model_id, gguf_file=self.q4_0_mistral_model_id) model = AutoModelForCausalLM.from_pretrained( self.mistral_model_id, gguf_file=self.q4_0_mistral_model_id, device_map="auto", torch_dtype=torch.float16 ) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello,\n\nI'm trying to create a" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_qwen2_q4_0(self): tokenizer = AutoTokenizer.from_pretrained(self.qwen2_model_id, gguf_file=self.q4_0_qwen2_model_id) model = AutoModelForCausalLM.from_pretrained( self.qwen2_model_id, gguf_file=self.q4_0_qwen2_model_id, device_map="auto", torch_dtype=torch.float16 ) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello.jsoup\n\nI am a beginner" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_llama3_q4_0_tokenizer(self): tokenizer = AutoTokenizer.from_pretrained(self.llama3_model_id, gguf_file=self.q4_llama3_model_id) with tempfile.TemporaryDirectory() as tmpdirname: tokenizer.save_pretrained(tmpdirname) tokenizer = AutoTokenizer.from_pretrained(tmpdirname) special_sentence = "สวัสดี" predicted_text = tokenizer.decode(tokenizer.encode(special_sentence, return_tensors="pt")[0]) self.assertEqual(predicted_text, "<|begin_of_text|>" + special_sentence) def test_llama3_q4_0(self): tokenizer = AutoTokenizer.from_pretrained(self.llama3_model_id, gguf_file=self.q4_llama3_model_id) model = AutoModelForCausalLM.from_pretrained( self.llama3_model_id, gguf_file=self.q4_llama3_model_id, device_map="auto", torch_dtype=torch.float16 ) text = tokenizer(self.example_text, return_tensors="pt").to(torch_device) out = model.generate(**text, max_new_tokens=10) EXPECTED_TEXT = "Hello, I am interested in [The Park]\nThe" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) def test_tokenization_xnli(self): import tqdm from datasets import load_dataset gguf_tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q8_0_gguf_model_id) original_tokenizer = AutoTokenizer.from_pretrained(self.original_model_id) dataset = load_dataset("google/code_x_glue_ct_code_to_text", "go") for item in tqdm.tqdm(dataset["validation"]): string = item["code"] encoded1 = gguf_tokenizer.encode(string) encoded2 = original_tokenizer.encode(string) self.assertEqual(encoded1, encoded2) decoded1 = gguf_tokenizer.decode(encoded1, skip_special_tokens=True) decoded2 = original_tokenizer.decode(encoded2, skip_special_tokens=True) self.assertEqual(decoded1, decoded2) dataset = load_dataset("facebook/xnli", "all_languages") for i, item in enumerate(tqdm.tqdm(dataset["train"].select(range(100)))): for string in item["premise"].values(): encoded1 = gguf_tokenizer.encode(string) encoded2 = original_tokenizer.encode(string) self.assertEqual(encoded1, encoded2) decoded1 = gguf_tokenizer.decode(encoded1, skip_special_tokens=True) decoded2 = original_tokenizer.decode(encoded2, skip_special_tokens=True) self.assertEqual(decoded1, decoded2) # With special tokens gguf_tokenizer = AutoTokenizer.from_pretrained(self.model_id, gguf_file=self.q8_0_gguf_model_id) original_tokenizer = AutoTokenizer.from_pretrained(self.original_model_id) gguf_tokenizer.add_special_tokens( {"additional_special_tokens": [AddedToken("<token>", rstrip=False, lstrip=False)]} ) original_tokenizer.add_special_tokens( {"additional_special_tokens": [AddedToken("<token>", rstrip=False, lstrip=False)]} ) text = "Hello <token>. <token> Hello" encoded1 = gguf_tokenizer.encode(text) encoded2 = original_tokenizer.encode(text) self.assertEqual(encoded1, encoded2) decoded1 = gguf_tokenizer.decode(encoded1, skip_special_tokens=True) decoded2 = original_tokenizer.decode(encoded2, skip_special_tokens=True) self.assertEqual(decoded1, decoded2)
transformers/tests/quantization/ggml/test_ggml.py/0
{ "file_path": "transformers/tests/quantization/ggml/test_ggml.py", "repo_id": "transformers", "token_count": 5841 }
441
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import inspect import json import os import random import tempfile import unittest from importlib import import_module from math import isnan from typing import List, Tuple from datasets import Dataset from transformers import is_tf_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import ( # noqa: F401 CaptureLogger, _tf_gpu_memory_limit, is_pt_tf_cross_test, require_tf, require_tf2onnx, slow, torch_device, ) from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging from transformers.utils.generic import ModelOutput logger = logging.get_logger(__name__) if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFAutoModel, TFAutoModelForSequenceClassification, TFSharedEmbeddings, ) from transformers.generation import ( TFBeamSampleDecoderOnlyOutput, TFBeamSampleEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput, TFBeamSearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput, TFGreedySearchEncoderDecoderOutput, TFSampleDecoderOnlyOutput, TFSampleEncoderDecoderOutput, ) from transformers.modeling_tf_utils import keras tf.config.experimental.enable_tensor_float_32_execution(False) if _tf_gpu_memory_limit is not None: gpus = tf.config.list_physical_devices("GPU") for gpu in gpus: # Restrict TensorFlow to only allocate x GB of memory on the GPUs try: tf.config.set_logical_device_configuration( gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)] ) logical_gpus = tf.config.list_logical_devices("GPU") print("Logical GPUs", logical_gpus) except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized print(e) if is_torch_available(): import torch def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key: setattr(configs_no_init, key, 0.0) return configs_no_init @require_tf class TFModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () test_mismatched_shapes = True test_resize_embeddings = True test_head_masking = True is_encoder_decoder = False has_attentions = True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING), *get_values(TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING), ]: inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING), ] and "labels" in dict(inspect.signature(model_class.call).parameters): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) elif model_class in get_values(TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = tf.zeros( (self.model_tester.batch_size, num_patches**2), dtype=tf.int32 ) elif model_class in get_values(TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = tf.zeros((self.model_tester.batch_size, height, width), dtype=tf.int32) elif model_class.__name__.endswith("ForCTC"): # When we have enough CTC models for an AutoClass, we should use their mapping instead of name checks inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict def test_initialization(self): pass def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) after_outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assert_outputs_same(after_outputs, outputs) def test_save_load_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) model_config = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(model_config) new_model = model_class.from_config(model.get_config()) # make sure it also accepts a normal config _ = model_class.from_config(model.config) _ = new_model(self._prepare_for_class(inputs_dict, model_class)) # Build model new_model.set_weights(model.get_weights()) after_outputs = new_model(self._prepare_for_class(inputs_dict, model_class)) self.assert_outputs_same(after_outputs, outputs) @slow def test_saved_model_creation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = False config.output_attentions = False if hasattr(config, "use_cache"): config.use_cache = False model_class = self.all_model_classes[0] class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) model(class_inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") self.assertTrue(os.path.exists(saved_model_dir)) def test_prepare_serving_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(inputs) serving_outputs = model.serving_output(outputs) for k, v in serving_outputs.items(): # Check that we have one of three possible outputs: None, tuple of tensors or a tensor if isinstance(v, tuple): self.assertTrue(all(isinstance(elem, tf.Tensor) for elem in v)) elif v is not None: self.assertIsInstance(v, tf.Tensor) else: self.assertIsNone(v) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend(["decoder_position_ids"] if "decoder_position_ids" in arg_names else []) expected_arg_names.extend( ["head_mask", "decoder_head_mask"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) expected_arg_names.extend( ["cross_attn_head_mask", "encoder_outputs"] if "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_onnx_compliancy(self): if not self.test_onnx: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() INTERNAL_OPS = [ "Assert", "AssignVariableOp", "EmptyTensorList", "ReadVariableOp", "ResourceGather", "TruncatedNormal", "VarHandleOp", "VarIsInitializedOp", ] onnx_ops = [] with open(os.path.join(".", "utils", "tf_ops", "onnx.json")) as f: onnx_opsets = json.load(f)["opsets"] for i in range(1, self.onnx_min_opset + 1): onnx_ops.extend(onnx_opsets[str(i)]) for model_class in self.all_model_classes: model_op_names = set() with tf.Graph().as_default() as g: model = model_class(config) model.build_in_name_scope() for op in g.get_operations(): model_op_names.add(op.node_def.op) model_op_names = sorted(model_op_names) incompatible_ops = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(op) self.assertEqual(len(incompatible_ops), 0, incompatible_ops) # `tf2onnx` issue page: https://github.com/onnx/tensorflow-onnx/issues/2172 # TODO: undo skip once a fix is done in `tf2onnx` @unittest.skip("`tf2onnx` broke with TF 2.13") @require_tf2onnx @slow def test_onnx_runtime_optimize(self): if not self.test_onnx: return import onnxruntime import tf2onnx config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) model.build_in_name_scope() onnx_model_proto, _ = tf2onnx.convert.from_keras(model, opset=self.onnx_min_opset) onnxruntime.InferenceSession(onnx_model_proto.SerializeToString()) def test_keras_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter if "T5" in main_layer_class.__name__: # Take the same values than in TFT5ModelTester for this shared layer shared = TFSharedEmbeddings(99, 32, name="shared") config.use_cache = inputs_dict.pop("use_cache", None) main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() if tf.is_tensor(tensor) } model = keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, keras.Model) after_outputs = model(inputs_dict) self.assert_outputs_same(after_outputs, outputs) def assert_outputs_same(self, after_outputs, outputs): # Make sure we don't have nans if isinstance(after_outputs, tf.Tensor): out_1 = after_outputs.numpy() elif isinstance(after_outputs, dict): out_1 = after_outputs[list(after_outputs.keys())[0]].numpy() else: out_1 = after_outputs[0].numpy() out_2 = outputs[0].numpy() self.assertEqual(out_1.shape, out_2.shape) out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _make_attention_mask_non_null(self, inputs_dict): """Make sure no sequence has all zeros as attention mask""" for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = tf.concat( [tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1 ) # Here we make the first sequence with all 0s as attention mask. # Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative # values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks. # TODO: enable this block once the large negative values thing is cleaned up. # (see https://github.com/huggingface/transformers/issues/14859) # attention_mask = tf.concat( # [ # tf.zeros_like(attention_mask[:1], dtype=tf.int32), # tf.cast(attention_mask[1:], dtype=tf.int32) # ], # axis=0 # ) inputs_dict[k] = attention_mask # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): """For temporarily ignoring some failed test cases (issues to be fixed)""" tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "TFFlaubertWithLMHeadModel", "TFFunnelForPreTraining", "TFElectraForPreTraining", "TFXLMWithLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("TFGPT2"): # `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple. tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") # create new outputs from the remaining fields new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) # Don't copy this block to model specific test file! # TODO: remove this method and this line after issues are fixed tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `names` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `names` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict): pt_inputs_dict = {} for name, key in tf_inputs_dict.items(): if isinstance(key, bool): pt_inputs_dict[name] = key elif name == "input_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "pixel_values": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) elif name == "input_features": pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) # other general float inputs elif tf_inputs_dict[name].dtype.is_floating: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32) else: pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long) return pt_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict): pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model)) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. # TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it. self._make_attention_mask_non_null(inputs_dict) pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) tf_model = model_class(config) pt_model = pt_model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) tf_inputs_dict_with_labels = self._prepare_for_class( inputs_dict, model_class, # Not all models accept "labels" in the forward pass (yet :) ) return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False, ) # For some models (e.g. base models), there is no label returned. # Set the input dict to `None` to avoid check outputs twice for the same input dicts. if not set(tf_inputs_dict_with_labels.keys()).symmetric_difference(tf_inputs_dict.keys()): tf_inputs_dict_with_labels = None # Check we can load pt model in tf and vice-versa with model => model functions tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # check with `labels` if tf_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels) # Check we can load pt model in tf and vice-versa with checkpoint => model functions with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # check with `labels` if tf_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels) @slow def test_compile_tf_model(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: # Prepare our model model = model_class(config) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes functional_inputs = { key: keras.Input(shape=val.shape[1:], dtype=val.dtype, name=key) for key, val in model.input_signature.items() if key in model.dummy_inputs } outputs_dict = model(functional_inputs) hidden_states = outputs_dict[0] # Compile extended model functional_model = keras.Model(inputs=functional_inputs, outputs=hidden_states) model_out = functional_model.predict(model.dummy_inputs) # Check we can pass inputs with the Keras API self.assertTrue(model_out is not None) with tempfile.TemporaryDirectory() as tmpdirname: functional_model.save(tmpdirname) # Ensure we can save/export the whole functional model def test_keyword_and_dict_args(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) outputs_keywords = model(**inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) def check_decoder_attentions_output(outputs): out_len = len(outputs) self.assertEqual(min(out_len % 2, out_len % 5), 0) # differentiation due to newly added cross_attentions decoder_attentions = outputs.decoder_attentions self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) def check_encoder_attentions_output(outputs): attentions = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) out_len = len(outputs) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) if self.is_encoder_decoder: model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_decoder_attentions_output(outputs) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(config.output_hidden_states, False) check_encoder_attentions_output(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True config.output_hidden_states = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs)) self.assertEqual(model.config.output_hidden_states, True) check_encoder_attentions_output(outputs) def test_headmasking(self): if not self.test_head_masking: return random.Random().seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() random.Random().seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) # To be sure we have no Nan for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Prepare head_mask def prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return tf.concat( (tf.zeros(1, dtype=tf.float32), tf.ones(attention_heads - 1, dtype=tf.float32)), 0 ) elif i == num_hidden_layers - 1: return tf.concat( (tf.zeros(attention_heads - 1, dtype=tf.float32), tf.ones(1, dtype=tf.float32)), 0 ) else: return tf.ones(attention_heads, dtype=tf.float32) head_mask = tf.stack( [ prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ], 0, ) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask if model.config.is_encoder_decoder: signature = inspect.signature(model.call) arg_names = [*signature.parameters.keys()] if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model inputs["decoder_head_mask"] = head_mask if "cross_attn_head_mask" in arg_names: inputs["cross_attn_head_mask"] = head_mask outputs = model(**inputs, return_dict=True) def check_attentions_validity(attentions): # Remove Nan for t in attentions: self.assertLess( (tf.math.reduce_sum(tf.cast(tf.math.is_nan(t), tf.float32))).numpy(), (tf.size(t) / 4).numpy() ) # Check we don't have more than 25% nans (arbitrary) attentions = [ tf.where(tf.math.is_nan(t), 0.0, t) for t in attentions ] # remove them (the test is less complete) self.assertAlmostEqual(tf.math.reduce_sum(attentions[0][..., 0, :, :]).numpy(), 0.0) self.assertNotEqual(tf.math.reduce_sum(attentions[0][..., -1, :, :]).numpy(), 0.0) if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules self.assertNotEqual(tf.math.reduce_sum(attentions[1][..., 0, :, :]).numpy(), 0.0) self.assertAlmostEqual(tf.math.reduce_sum(attentions[-1][..., -2, :, :]).numpy(), 0.0) self.assertNotEqual(tf.math.reduce_sum(attentions[-1][..., -1, :, :]).numpy(), 0.0) if model.config.is_encoder_decoder: check_attentions_validity(outputs.encoder_attentions) check_attentions_validity(outputs.decoder_attentions) if "cross_attn_head_mask" in arg_names: check_attentions_validity(outputs.cross_attentions) else: check_attentions_validity(outputs.attentions) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) if model.config.is_encoder_decoder: encoder_hidden_states = outputs.encoder_hidden_states decoder_hidden_states = outputs.decoder_hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(encoder_hidden_states), expected_num_layers) self.assertListEqual( list(encoder_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(decoder_hidden_states), expected_num_layers) self.assertListEqual( list(decoder_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) else: hidden_states = outputs.hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() text_in_text_out_models = ( get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING) + get_values(TF_MODEL_FOR_MASKED_LM_MAPPING) + get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING) ) speech_in_text_out_models = get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING) for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), keras.layers.Layer) legacy_text_in_text_out = model.get_lm_head() is not None if model_class in text_in_text_out_models or legacy_text_in_text_out: out_embeddings = model.get_output_embeddings() self.assertIsInstance(out_embeddings, keras.layers.Layer) bias = model.get_bias() if bias is not None: self.assertIsInstance(bias, dict) for _, v in bias.items(): self.assertIsInstance(v, tf.Variable) elif model_class in speech_in_text_out_models: out_embeddings = model.get_output_embeddings() self.assertIsInstance(out_embeddings, keras.layers.Layer) bias = model.get_bias() self.assertIsNone(bias) else: out_embeddings = model.get_output_embeddings() assert out_embeddings is None bias = model.get_bias() self.assertIsNone(bias) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) first, second = ( model(self._prepare_for_class(inputs_dict, model_class), training=False)[0], model(self._prepare_for_class(inputs_dict, model_class), training=False)[0], ) out_1 = first.numpy() out_2 = second.numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(tuple_object, dict_object)), msg=( "Tuple and dict output are not equal. Difference:" f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}" ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) # Not all models accept "labels" in the forward pass (yet :) ) if "labels" in inspect.signature(model.call).parameters.keys(): tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) model(inputs) def test_numpy_arrays_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def prepare_numpy_arrays(inputs_dict): inputs_np_dict = {} for k, v in inputs_dict.items(): if tf.is_tensor(v): inputs_np_dict[k] = v.numpy() else: inputs_np_dict[k] = np.array(k) return inputs_np_dict for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) inputs_np = prepare_numpy_arrays(inputs) output_for_dict_input = model(inputs_np) output_for_kw_input = model(**inputs_np) self.assert_outputs_same(output_for_dict_input, output_for_kw_input) def test_valid_input_signature_and_dummies(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) call_args = inspect.signature(model.call).parameters for key in model.input_signature: self.assertIn(key, call_args) for key in model.dummy_inputs: self.assertIn(key, call_args) def test_resize_token_embeddings(self): # TODO (joao): after the embeddings refactor is complete, rework this test so as to rely exclusively on # keras.layers.Embedding if not self.test_resize_embeddings: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if isinstance(embedding_layer, keras.layers.Embedding): # builds the embeddings layer model.build_in_name_scope() return embedding_layer.embeddings else: return model._get_word_embedding_weight(embedding_layer) for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10, None]: # build the embeddings model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config` old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_bias = model.get_bias() old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # reshape the embeddings model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_bias = model.get_bias() new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) # check that the resized embeddings size matches the desired size. assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) # check that weights remain the same after resizing models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_bias is not None and new_bias is not None: for old_weight, new_weight in zip(old_bias.values(), new_bias.values()): self.assertEqual(new_weight.shape[-1], assert_size) models_equal = True for p1, p2 in zip(tf.squeeze(old_weight), tf.squeeze(new_weight)): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1]) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) # TODO (Joao): this test is not slow, but it's tagged as such to keep track of failures on the scheduled CI runs, # while passing push CI. Fix the underlying issues and remove the tag. @slow def test_save_load_after_resize_token_embeddings(self): if not self.test_resize_embeddings: return config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # create a model with resized (expended) embeddings new_tokens_size = 10 old_total_size = config.vocab_size new_total_size = old_total_size + new_tokens_size model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config` model.build_in_name_scope() model.resize_token_embeddings(new_total_size) # fetch the output for an input exclusively made of new members of the vocabulary inputs_dict = copy.deepcopy(original_inputs_dict) ids_feat_name = None if "input_ids" in inputs_dict: ids_feat_name = "input_ids" elif "decoder_input_ids" in inputs_dict: ids_feat_name = "decoder_input_ids" else: assert False, "No input ids feature found in the inputs dict" new_vocab_input_ids = ids_tensor(inputs_dict[ids_feat_name].shape, new_tokens_size) new_vocab_input_ids += old_total_size inputs_dict[ids_feat_name] = new_vocab_input_ids if "input_ids" in inputs_dict: inputs_dict["input_ids"] = new_vocab_input_ids if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"] = new_vocab_input_ids prepared_inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs) # save and load the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model = model_class.from_pretrained(tmpdirname) restored_model_outputs = model(**prepared_inputs) # check that the output for the restored model is the same self.assert_outputs_same(restored_model_outputs, outputs) @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="This test always passes on CPU.", ) def test_embeddings_out_of_bounds_raise_exception(self): # TF embeddings layers don't raise an exception when an index is out of bounds on GPU, so we manually raise it. # This test should only fail on GPU for models where we haven't added the safety check. if not self.test_resize_embeddings: return config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) inputs_dict = copy.deepcopy(original_inputs_dict) if "input_ids" in inputs_dict: inputs_dict["input_ids"] = inputs_dict["input_ids"] * int(1e9) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"] = inputs_dict["decoder_input_ids"] * int(1e9) prepared_inputs = self._prepare_for_class(inputs_dict, model_class) with self.assertRaises(tf.errors.InvalidArgumentError): model(**prepared_inputs) def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_ids with self.assertRaises(ValueError): model.generate(do_sample=True, max_length=5) # num_return_sequences = 1 self._check_generated_ids(model.generate(input_ids, do_sample=True)) elif model_class.__name__ not in ["TFSpeech2TextForConditionalGeneration"]: # Models with non-text inputs won't work here; num_return_sequences = 1 self._check_generated_ids(model.generate(do_sample=True, max_length=5)) with self.assertRaises(ValueError): # generating multiple sequences when no beam search generation # is not allowed as it would always generate the same sequences model.generate(input_ids, do_sample=False, num_return_sequences=2) # num_return_sequences > 1, sample self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2)) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_ids.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_lm_head_model_no_beam_search_generate_dict_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) if input_ids is None: input_ids = inputs_dict.get("input_features", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) output_greedy = model.generate( input_ids, do_sample=False, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) output_sample = model.generate( input_ids, do_sample=True, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, TFGreedySearchEncoderDecoderOutput) self.assertIsInstance(output_sample, TFSampleEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, TFGreedySearchDecoderOnlyOutput) self.assertIsInstance(output_sample, TFSampleDecoderOnlyOutput) def test_lm_head_model_random_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_ids, num_return_sequences = 1 self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2)) else: # num_return_sequences = 1 self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2)) with self.assertRaises(ValueError): # generating more sequences than having beams leads is not possible model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2) # num_return_sequences > 1, sample self._check_generated_ids( model.generate( input_ids, do_sample=True, num_beams=2, num_return_sequences=2, ) ) # num_return_sequences > 1, greedy self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2)) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_ids.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_lm_head_model_beam_search_generate_dict_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) if input_ids is None: input_ids = inputs_dict.get("input_features", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) output_beam_search = model.generate( input_ids, num_beams=2, do_sample=False, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) output_beam_sample = model.generate( input_ids, num_beams=2, do_sample=True, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, TFBeamSearchEncoderDecoderOutput) self.assertIsInstance(output_beam_sample, TFBeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, TFBeamSearchDecoderOnlyOutput) self.assertIsInstance(output_beam_sample, TFBeamSampleDecoderOnlyOutput) def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # The number of elements in the loss should be the same as the number of elements in the label prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) added_label_names = sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True) if not added_label_names: continue # This test is only for models with easily-separable labels added_label = prepared_for_class[added_label_names[0]] expected_loss_size = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) outputs = model(model_input, **prepared_for_class) if not isinstance(outputs, ModelOutput) or not hasattr(outputs, "loss"): continue loss = outputs.loss self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss when we mask some positions prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"} input_name = possible_input_names.intersection(set(prepared_for_class)).pop() model_input = prepared_for_class.pop(input_name) if "labels" in prepared_for_class: labels = prepared_for_class["labels"].numpy() if len(labels.shape) > 1 and labels.shape[1] != 1: labels[0] = -100 prepared_for_class["labels"] = tf.convert_to_tensor(labels) loss = model(model_input, **prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) self.assertTrue(not np.any(np.isnan(loss.numpy()))) # Test that model correctly compute the loss with a dict prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) loss = model(prepared_for_class)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss with a tuple prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # Get keys that were added with the _prepare_for_class function label_keys = prepared_for_class.keys() - inputs_dict.keys() signature = inspect.signature(model.call).parameters signature_names = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple tuple_index_mapping = {0: input_name} for label_key in label_keys: label_key_index = signature_names.index(label_key) tuple_index_mapping[label_key_index] = label_key sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple list_input = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: list_input[index] = prepared_for_class[value] tuple_input = tuple(list_input) # Send to model loss = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) def check_keras_fit_results(self, val_loss1, val_loss2, atol=1e-2, rtol=1e-3): self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) @slow def test_keras_fit(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # We also remove "return_loss" as this is covered by the train_step when using fit() prepared_for_class = { key: val for key, val in prepared_for_class.items() if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "return_loss") } if "labels" in prepared_for_class and "decoder_input_ids" in prepared_for_class: del prepared_for_class["decoder_input_ids"] accuracy_classes = [ "ForPreTraining", "ForCausalLM", "ForMaskedLM", "ForQuestionAnswering", "ForMultipleChoice", "ForSequenceClassification", "ForTokenClassification", "ForNextSentencePrediction", "LMHeadModel", ] for accuracy_class in accuracy_classes: if model.__class__.__name__.endswith(accuracy_class): metrics = [keras.metrics.SparseCategoricalAccuracy()] break else: metrics = [] if hasattr(self.model_tester, "batch_size"): sample_weight = tf.convert_to_tensor([0.5] * self.model_tester.batch_size, dtype=tf.float32) else: sample_weight = None # Build the model so we can get some constant weights and check outputs outputs = model(prepared_for_class) if getattr(outputs, "loss", None) is None: continue model_weights = model.get_weights() # Run eagerly to save some expensive compilation times model.compile(optimizer=keras.optimizers.SGD(0.0), run_eagerly=True, metrics=metrics) # Make sure the model fits without crashing regardless of where we pass the labels history1 = model.fit( prepared_for_class, validation_data=prepared_for_class, sample_weight=sample_weight, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss1 = history1.history["val_loss"][0] self.assertTrue(not isnan(val_loss1)) accuracy1 = {key: val[0] for key, val in history1.history.items() if key.endswith("accuracy")} possible_label_cols = { "labels", "label", "label_ids", "start_positions", "start_position", "end_positions", "end_position", "next_sentence_label", } label_names = possible_label_cols.intersection(set(prepared_for_class)) if len(label_names) == 0: # The next tests only make sense for models with separate inputs and labels, and do not make # sense for models that don't clearly distinguish between the two (e.g. CLIP) return labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names} self.assertGreater(len(inputs_minus_labels), 0) # We reinitialize the model here even though our learning rate was zero # because BatchNorm updates weights by means other than gradient descent. model.set_weights(model_weights) history2 = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), sample_weight=sample_weight, steps_per_epoch=1, validation_steps=1, shuffle=False, ) val_loss2 = history2.history["val_loss"][0] self.assertTrue(not isnan(val_loss2)) accuracy2 = {key: val[0] for key, val in history2.history.items() if key.endswith("accuracy")} self.check_keras_fit_results(val_loss1, val_loss2) self.assertEqual(history1.history.keys(), history2.history.keys()) for key in history1.history.keys(): if not key.startswith("val_"): self.assertTrue("val_" + key in history1.history.keys(), "Outputs differ in train/test step!") if metrics: self.assertTrue(len(accuracy1) == len(accuracy2) > 0, "Missing metrics!") def test_int_support(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: prepared_for_class = self._prepare_for_class( inputs_dict.copy(), model_class, return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False, ) if not any( tensor.dtype.is_integer for tensor in prepared_for_class.values() if isinstance(tensor, tf.Tensor) ): return # No integer inputs means no need for this test prepared_for_class = { key: tf.cast(tensor, tf.int64) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor for key, tensor in prepared_for_class.items() } model = model_class(config) model(**prepared_for_class) # No assertion, we're just checking this doesn't throw an error int32_prepared_for_class = { key: tf.cast(tensor, tf.int32) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor for key, tensor in prepared_for_class.items() } model(**int32_prepared_for_class) # No assertion, we're just checking this doesn't throw an error # After testing that the model accepts all int inputs, confirm that its dummies are int32 for key, tensor in model.dummy_inputs.items(): self.assertTrue( isinstance(tensor, tf.Tensor) or keras.backend.is_keras_tensor(tensor), "Dummy inputs should be tf.Tensor!", ) if tensor.dtype.is_integer: self.assertTrue(tensor.dtype == tf.int32, "Integer dummy inputs should be tf.int32!") # Also confirm that the input_signature uses int32 for key, tensor_spec in model.input_signature.items(): if tensor_spec.dtype.is_integer: self.assertTrue(tensor_spec.dtype == tf.int32, "Input signatures should use tf.int32 for ints!") def test_generate_with_headmasking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_generative_model_classes: model = model_class(config) # We want to test only encoder-decoder models if not config.is_encoder_decoder: continue head_masking = { "head_mask": tf.zeros((config.encoder_layers, config.encoder_attention_heads)), "decoder_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)), "cross_attn_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)), } signature = inspect.signature(model.call) if set(head_masking.keys()) < {*signature.parameters.keys()}: continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( inputs_dict["input_ids"], num_beams=1, max_length=inputs_dict["input_ids"] + 5, output_attentions=True, return_dict_in_generate=True, **{name: mask}, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([tf.reduce_sum(w).numpy() for w in attn_weights]), 0.0) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) _ = model(**inputs) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(ValueError): new_model = TFAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = TFAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_tf_utils") with CaptureLogger(logger) as cl: new_model = TFAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs).logits self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = TFAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) # Although Tf models always have a prefix pointing to `MainLayer`, # we still add this "without prefix" test to keep a consistency between tf and pt tests. input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "call")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_dataset_conversion(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=False) if "labels" in tf_inputs_dict: return # This is some kinda funky decoder model that needs labels in its forward pass tf_inputs_dict = { key: val for key, val in tf_inputs_dict.items() if "head_mask" not in key and isinstance(val, tf.Tensor) } tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] # Use a random other tensor input_dataset = Dataset.from_dict(tf_inputs_dict) tf_dataset = model.prepare_tf_dataset( input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False ) test_batch = next(iter(tf_dataset)) if isinstance(test_batch, tf.Tensor): self.assertEqual(len(test_batch), len(input_dataset)) # Assert we didn't lose any data elif isinstance(test_batch, dict): # Assert we discarded the unwanted extra column but kept everything else self.assertEqual(len(test_batch), len(input_dataset.features) - 1) self.assertNotIn("extra_unwanted_column", test_batch) for tensor in test_batch.values(): self.assertTrue(isinstance(tensor, tf.Tensor)) self.assertEqual(len(tensor), len(input_dataset)) # Assert we didn't lose any data model(test_batch, training=False) if "labels" in inspect.signature(model_class.call).parameters.keys(): tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if "labels" not in tf_inputs_dict: return # This model isn't giving us labels after all, don't try training with it tf_inputs_dict = { key: val for key, val in tf_inputs_dict.items() if "head_mask" not in key and isinstance(val, tf.Tensor) } tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] # Use a random other tensor input_dataset = Dataset.from_dict(tf_inputs_dict) tf_dataset = model.prepare_tf_dataset( input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False ) test_batch, test_batch_labels = next(iter(tf_dataset)) self.assertGreater(len(test_batch_labels), 0) # Assert the labels are present feature_columns = 1 if isinstance(test_batch, tf.Tensor) else len(test_batch) label_columns = 1 if isinstance(test_batch_labels, tf.Tensor) else len(test_batch_labels) # Assert we discarded the unwanted extra column but kept everything else self.assertEqual(feature_columns + label_columns, len(input_dataset.features) - 1) if isinstance(test_batch, dict): self.assertNotIn("extra_unwanted_column", test_batch) if isinstance(test_batch_labels, dict): self.assertNotIn("extra_unwanted_column", test_batch_labels) model.compile(optimizer="sgd", run_eagerly=True) model.train_on_batch(test_batch, test_batch_labels) def _test_xla_generate(self, **generate_kwargs): def _generate_and_check_results(model, inputs_dict): if "input_ids" in inputs_dict: inputs = inputs_dict["input_ids"] # make sure there are no pad tokens in prompt, which may trigger unwanted behavior if model.generation_config.pad_token_id is not None: if config.pad_token_id == 0: new_pad_token = model.generation_config.pad_token_id + 1 else: new_pad_token = model.generation_config.pad_token_id - 1 else: new_pad_token = None inputs = tf.where(inputs != model.generation_config.pad_token_id, inputs, new_pad_token) elif "input_features" in inputs_dict: inputs = inputs_dict["input_features"] else: raise ValueError("No valid generate input found in inputs_dict") generated = model.generate(inputs, **generate_kwargs).numpy() generate_xla = tf.function(model.generate, jit_compile=True) generated_xla = generate_xla(inputs, **generate_kwargs).numpy() # Due to numerical instability, let's fail the test only if there are more than 10% of input sequences give # different outputs between XLA and non-XLA versions. If there are less than 10 examples, let's be strict # and not allow any difference. diff = [[], []] for _generated, _generated_xla in zip(generated.tolist(), generated_xla.tolist()): if _generated != _generated_xla: diff[0].append(_generated) diff[1].append(_generated_xla) ratio = len(diff[0]) / len(generated) if ratio > 0.1 or (len(diff[0]) > 0 and len(generated) < 10): self.assertListEqual(diff[0], diff[1]) for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.eos_token_id = None # Generate until max length config.do_sample = False # fix config for models with additional sequence-length limiting settings for var_name in ["max_position_embeddings", "max_target_positions"]: attr = getattr(config, var_name, None) if attr is not None and attr < generate_kwargs["max_new_tokens"]: try: setattr(config, var_name, generate_kwargs["max_new_tokens"]) except NotImplementedError: # xlnet will raise an exception when trying to set # max_position_embeddings. pass model = model_class(config) if model.supports_xla_generation: _generate_and_check_results(model, inputs_dict) else: with self.assertRaises(ValueError): _generate_and_check_results(model, inputs_dict) def test_xla_generate_fast(self): """ Basic quick test for generate-compatible classes that confirms that XLA-generated tokens are the same as their non XLA counterparts. Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception """ self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=3) @slow def test_xla_generate_contrastive(self): """ Slow and challenging version of `test_xla_generate_fast` for contrastive search -- contrastive search directly manipulates the model cache and other outputs, and this test ensures that they are in a valid format that is also supported by XLA. Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception """ self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=16, penalty_alpha=0.5, top_k=4) @slow def test_xla_generate_slow(self): """ Slow and challenging version of `test_xla_generate_fast` -- this test asks for several long sequences using beam search, with and without XLA. The two outputs should match, and a failure in this test indicates that the model may need further analysis if it is to be used for XLA generation. Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception """ self._test_xla_generate(num_beams=8, num_return_sequences=2, max_new_tokens=128) def _generate_random_bad_tokens(self, num_bad_tokens, model): # special tokens cannot be bad tokens special_tokens = [] if model.config.bos_token_id is not None: special_tokens.append(model.config.bos_token_id) if model.config.pad_token_id is not None: special_tokens.append(model.config.pad_token_id) if model.config.eos_token_id is not None: special_tokens.append(model.config.eos_token_id) # create random bad tokens that are not special tokens bad_tokens = [] while len(bad_tokens) < num_bad_tokens: token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0] if token not in special_tokens: bad_tokens.append(token) return bad_tokens def _check_generated_ids(self, output_ids): for token_id in output_ids[0].numpy().tolist(): self.assertGreaterEqual(token_id, 0) self.assertLess(token_id, self.model_tester.vocab_size) def _check_match_tokens(self, generated_ids, bad_words_ids): # for all bad word tokens for bad_word_ids in bad_words_ids: # for all slices in batch for generated_ids_slice in generated_ids: # for all word idx for i in range(len(bad_word_ids), len(generated_ids_slice)): # if tokens match if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids: return True return False def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32) return output def random_attention_mask(shape, rng=None, name=None, dtype=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None, dtype=dtype) # Mark the first token as 1 (matches behaviour of PyTorch/Flax function) attn_mask = tf.concat([tf.ones_like(attn_mask[:, :1]), attn_mask[:, 1:]], axis=1) return attn_mask def floats_tensor(shape, scale=1.0, rng=None, name=None, dtype=None): """Creates a random float32 tensor""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return tf.reshape(tf.constant(values, dtype=dtype if dtype is not None else tf.float32), shape=shape)
transformers/tests/test_modeling_tf_common.py/0
{ "file_path": "transformers/tests/test_modeling_tf_common.py", "repo_id": "transformers", "token_count": 43646 }
442
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import warnings import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, filter_out_non_signature_kwargs, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class GenericTester(unittest.TestCase): def test_flatten_dict(self): input_dict = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } expected_dict = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(input_dict), expected_dict) def test_transpose_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(transpose(x), x.transpose())) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), x.transpose((1, 2, 0)))) @require_torch def test_transpose_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_tf def test_transpose_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = tf.constant(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_flax def test_transpose_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(transpose(x), np.asarray(transpose(t)))) x = np.random.randn(3, 4, 5) t = jnp.array(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), np.asarray(transpose(t, axes=(1, 2, 0))))) def test_reshape_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(reshape(x, (4, 3)), np.reshape(x, (4, 3)))) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(reshape(x, (12, 5)), np.reshape(x, (12, 5)))) @require_torch def test_reshape_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_tf def test_reshape_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = tf.constant(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_flax def test_reshape_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), np.asarray(reshape(t, (4, 3))))) x = np.random.randn(3, 4, 5) t = jnp.array(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), np.asarray(reshape(t, (12, 5))))) def test_squeeze_numpy(self): x = np.random.randn(1, 3, 4) self.assertTrue(np.allclose(squeeze(x), np.squeeze(x))) x = np.random.randn(1, 4, 1, 5) self.assertTrue(np.allclose(squeeze(x, axis=2), np.squeeze(x, axis=2))) @require_torch def test_squeeze_torch(self): x = np.random.randn(1, 3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) @require_tf def test_squeeze_tf(self): x = np.random.randn(1, 3, 4) t = tf.constant(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = tf.constant(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) @require_flax def test_squeeze_flax(self): x = np.random.randn(1, 3, 4) t = jnp.array(x) self.assertTrue(np.allclose(squeeze(x), np.asarray(squeeze(t)))) x = np.random.randn(1, 4, 1, 5) t = jnp.array(x) self.assertTrue(np.allclose(squeeze(x, axis=2), np.asarray(squeeze(t, axis=2)))) def test_expand_dims_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.expand_dims(x, axis=1))) @require_torch def test_expand_dims_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) @require_tf def test_expand_dims_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) @require_flax def test_expand_dims_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.asarray(expand_dims(t, axis=1)))) class ValidationDecoratorTester(unittest.TestCase): def test_cases_no_warning(self): with warnings.catch_warnings(record=True) as raised_warnings: warnings.simplefilter("always") # basic test @filter_out_non_signature_kwargs() def func1(a): return a result = func1(1) self.assertEqual(result, 1) # include extra kwarg @filter_out_non_signature_kwargs(extra=["extra_arg"]) def func2(a, **kwargs): return a, kwargs a, kwargs = func2(1) self.assertEqual(a, 1) self.assertEqual(kwargs, {}) a, kwargs = func2(1, extra_arg=2) self.assertEqual(a, 1) self.assertEqual(kwargs, {"extra_arg": 2}) # multiple extra kwargs @filter_out_non_signature_kwargs(extra=["extra_arg", "extra_arg2"]) def func3(a, **kwargs): return a, kwargs a, kwargs = func3(2) self.assertEqual(a, 2) self.assertEqual(kwargs, {}) a, kwargs = func3(3, extra_arg2=3) self.assertEqual(a, 3) self.assertEqual(kwargs, {"extra_arg2": 3}) a, kwargs = func3(1, extra_arg=2, extra_arg2=3) self.assertEqual(a, 1) self.assertEqual(kwargs, {"extra_arg": 2, "extra_arg2": 3}) # Check that no warnings were raised self.assertEqual(len(raised_warnings), 0, f"Warning raised: {[w.message for w in raised_warnings]}") def test_cases_with_warnings(self): @filter_out_non_signature_kwargs() def func1(a): return a with self.assertWarns(UserWarning): func1(1, extra_arg=2) @filter_out_non_signature_kwargs(extra=["extra_arg"]) def func2(a, **kwargs): return kwargs with self.assertWarns(UserWarning): kwargs = func2(1, extra_arg=2, extra_arg2=3) self.assertEqual(kwargs, {"extra_arg": 2}) @filter_out_non_signature_kwargs(extra=["extra_arg", "extra_arg2"]) def func3(a, **kwargs): return kwargs with self.assertWarns(UserWarning): kwargs = func3(1, extra_arg=2, extra_arg2=3, extra_arg3=4) self.assertEqual(kwargs, {"extra_arg": 2, "extra_arg2": 3})
transformers/tests/utils/test_generic.py/0
{ "file_path": "transformers/tests/utils/test_generic.py", "repo_id": "transformers", "token_count": 4758 }
443
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.metadata import sys from transformers.testing_utils import TestCasePlus from transformers.utils.versions import require_version, require_version_core numpy_ver = importlib.metadata.version("numpy") python_ver = ".".join([str(x) for x in sys.version_info[:3]]) class DependencyVersionCheckTest(TestCasePlus): def test_core(self): # lt + different version strings require_version_core("numpy<1000.4.5") require_version_core("numpy<1000.4") require_version_core("numpy<1000") # le require_version_core("numpy<=1000.4.5") require_version_core(f"numpy<={numpy_ver}") # eq require_version_core(f"numpy=={numpy_ver}") # ne require_version_core("numpy!=1000.4.5") # ge require_version_core("numpy>=1.0") require_version_core("numpy>=1.0.0") require_version_core(f"numpy>={numpy_ver}") # gt require_version_core("numpy>1.0.0") # mix require_version_core("numpy>1.0.0,<1000") # requirement w/o version require_version_core("numpy") # unmet requirements due to version conflict for req in ["numpy==1.0.0", "numpy>=1000.0.0", f"numpy<{numpy_ver}"]: try: require_version_core(req) except ImportError as e: self.assertIn(f"{req} is required", str(e)) self.assertIn("but found", str(e)) # unmet requirements due to missing module for req in ["numpipypie>1", "numpipypie2"]: try: require_version_core(req) except importlib.metadata.PackageNotFoundError as e: self.assertIn(f"The '{req}' distribution was not found and is required by this application", str(e)) self.assertIn("Try: `pip install transformers -U`", str(e)) # bogus requirements formats: # 1. whole thing for req in ["numpy??1.0.0", "numpy1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("requirement needs to be in the pip package format", str(e)) # 2. only operators for req in ["numpy=1.0.0", "numpy == 1.00", "numpy<>1.0.0", "numpy><1.00", "numpy>>1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("need one of ", str(e)) def test_python(self): # matching requirement require_version("python>=3.6.0") # not matching requirements for req in ["python>9.9.9", "python<3.0.0"]: try: require_version_core(req) except ImportError as e: self.assertIn(f"{req} is required", str(e)) self.assertIn(f"but found python=={python_ver}", str(e))
transformers/tests/utils/test_versions_utils.py/0
{ "file_path": "transformers/tests/utils/test_versions_utils.py", "repo_id": "transformers", "token_count": 1539 }
444
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that checks the big table in the file docs/source/en/index.md and potentially updates it. Use from the root of the repo with: ```bash python utils/check_inits.py ``` for a check that will error in case of inconsistencies (used by `make repo-consistency`). To auto-fix issues run: ```bash python utils/check_inits.py --fix_and_overwrite ``` which is used by `make fix-copies`. """ import argparse import collections import os import re from typing import List from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py TRANSFORMERS_PATH = "src/transformers" PATH_TO_DOCS = "docs/source/en" REPO_PATH = "." def _find_text_in_file(filename: str, start_prompt: str, end_prompt: str) -> str: """ Find the text in filename between two prompts. Args: filename (`str`): The file to search into. start_prompt (`str`): A string to look for at the start of the content searched. end_prompt (`str`): A string that will mark the end of the content to look for. Returns: `str`: The content between the prompts. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start prompt. start_index = 0 while not lines[start_index].startswith(start_prompt): start_index += 1 start_index += 1 # Now go until the end prompt. end_index = start_index while not lines[end_index].startswith(end_prompt): end_index += 1 end_index -= 1 while len(lines[start_index]) <= 1: start_index += 1 while len(lines[end_index]) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index]), start_index, end_index, lines # Regexes that match TF/Flax/PT model names. Add here suffixes that are used to identify models, separated by | _re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch after the two previous regexes. _re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. transformers_module = direct_transformers_import(TRANSFORMERS_PATH) def camel_case_split(identifier: str) -> List[str]: """ Split a camel-cased name into words. Args: identifier (`str`): The camel-cased name to parse. Returns: `List[str]`: The list of words in the identifier (as seprated by capital letters). Example: ```py >>> camel_case_split("CamelCasedClass") ["Camel", "Cased", "Class"] ``` """ # Regex thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier) return [m.group(0) for m in matches] def _center_text(text: str, width: int) -> str: """ Utility that will add spaces on the left and right of a text to make it centered for a given width. Args: text (`str`): The text to center. width (`int`): The desired length of the result. Returns: `str`: A text of length `width` with the original `text` in the middle. """ text_length = 2 if text == "✅" or text == "❌" else len(text) left_indent = (width - text_length) // 2 right_indent = width - text_length - left_indent return " " * left_indent + text + " " * right_indent SPECIAL_MODEL_NAME_LINK_MAPPING = { "Data2VecAudio": "[Data2VecAudio](model_doc/data2vec)", "Data2VecText": "[Data2VecText](model_doc/data2vec)", "Data2VecVision": "[Data2VecVision](model_doc/data2vec)", "DonutSwin": "[DonutSwin](model_doc/donut)", } MODEL_NAMES_WITH_SAME_CONFIG = { "BARThez": "BART", "BARTpho": "BART", "BertJapanese": "BERT", "BERTweet": "BERT", "BORT": "BERT", "ByT5": "T5", "CPM": "OpenAI GPT-2", "DePlot": "Pix2Struct", "DialoGPT": "OpenAI GPT-2", "DiT": "BEiT", "FLAN-T5": "T5", "FLAN-UL2": "T5", "HerBERT": "BERT", "LayoutXLM": "LayoutLMv2", "Llama2": "LLaMA", "Llama3": "LLaMA", "MADLAD-400": "T5", "MatCha": "Pix2Struct", "mBART-50": "mBART", "Megatron-GPT2": "OpenAI GPT-2", "mLUKE": "LUKE", "MMS": "Wav2Vec2", "NLLB": "M2M100", "PhoBERT": "BERT", "T5v1.1": "T5", "TAPEX": "BART", "UL2": "T5", "Wav2Vec2Phoneme": "Wav2Vec2", "XLM-V": "XLM-RoBERTa", "XLS-R": "Wav2Vec2", "XLSR-Wav2Vec2": "Wav2Vec2", } MODEL_NAMES_TO_IGNORE = ["CLIPVisionModel", "SiglipVisionModel", "ChineseCLIPVisionModel", "Qwen2AudioEncoder"] def get_model_table_from_auto_modules() -> str: """ Generates an up-to-date model table from the content of the auto modules. """ # Dictionary model names to config. config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES model_name_to_config = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } model_name_to_prefix = {name: config.replace("Config", "") for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. pt_models = collections.defaultdict(bool) tf_models = collections.defaultdict(bool) flax_models = collections.defaultdict(bool) # Let's lookup through all transformers object (once). for attr_name in dir(transformers_module): lookup_dict = None if _re_tf_models.match(attr_name) is not None: lookup_dict = tf_models attr_name = _re_tf_models.match(attr_name).groups()[0] elif _re_flax_models.match(attr_name) is not None: lookup_dict = flax_models attr_name = _re_flax_models.match(attr_name).groups()[0] elif _re_pt_models.match(attr_name) is not None: lookup_dict = pt_models attr_name = _re_pt_models.match(attr_name).groups()[0] if lookup_dict is not None: while len(attr_name) > 0: if attr_name in model_name_to_prefix.values(): lookup_dict[attr_name] = True break # Try again after removing the last word in the name attr_name = "".join(camel_case_split(attr_name)[:-1]) # Let's build that table! model_names = list(model_name_to_config.keys()) + list(MODEL_NAMES_WITH_SAME_CONFIG.keys()) # model name to doc link mapping model_names_mapping = transformers_module.models.auto.configuration_auto.MODEL_NAMES_MAPPING model_name_to_link_mapping = {value: f"[{value}](model_doc/{key})" for key, value in model_names_mapping.items()} # update mapping with special model names model_name_to_link_mapping = { k: SPECIAL_MODEL_NAME_LINK_MAPPING[k] if k in SPECIAL_MODEL_NAME_LINK_MAPPING else v for k, v in model_name_to_link_mapping.items() } # MaskFormerSwin and TimmBackbone are backbones and so not meant to be loaded and used on their own. Instead, they define architectures which can be loaded using the AutoBackbone API. names_to_exclude = ["MaskFormerSwin", "TimmBackbone", "Speech2Text2"] model_names = [name for name in model_names if name not in names_to_exclude] model_names.sort(key=str.lower) columns = ["Model", "PyTorch support", "TensorFlow support", "Flax Support"] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). widths = [len(c) + 2 for c in columns] widths[0] = max([len(doc_link) for doc_link in model_name_to_link_mapping.values()]) + 2 # Build the table per se table = "|" + "|".join([_center_text(c, w) for c, w in zip(columns, widths)]) + "|\n" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths]) + "|\n" check = {True: "✅", False: "❌"} for name in model_names: if name in MODEL_NAMES_TO_IGNORE: continue if name in MODEL_NAMES_WITH_SAME_CONFIG.keys(): prefix = model_name_to_prefix[MODEL_NAMES_WITH_SAME_CONFIG[name]] else: prefix = model_name_to_prefix[name] line = [ model_name_to_link_mapping[name], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(l, w) for l, w in zip(line, widths)]) + "|\n" return table def check_model_table(overwrite=False): """ Check the model table in the index.md is consistent with the state of the lib and potentially fix it. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the table when it's not up to date. """ current_table, start_index, end_index, lines = _find_text_in_file( filename=os.path.join(PATH_TO_DOCS, "index.md"), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", ) new_table = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(PATH_TO_DOCS, "index.md"), "w", encoding="utf-8", newline="\n") as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:]) else: raise ValueError( "The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_model_table(args.fix_and_overwrite)
transformers/utils/check_table.py/0
{ "file_path": "transformers/utils/check_table.py", "repo_id": "transformers", "token_count": 4387 }
445
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import collections import datetime import functools import json import operator import os import re import sys import time from typing import Dict, List, Optional, Union import requests from get_ci_error_statistics import get_jobs from get_previous_daily_ci import get_last_daily_ci_reports from huggingface_hub import HfApi from slack_sdk import WebClient api = HfApi() client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) NON_MODEL_TEST_MODULES = [ "benchmark", "deepspeed", "extended", "fixtures", "generation", "onnx", "optimization", "pipelines", "sagemaker", "trainer", "utils", ] def handle_test_results(test_results): expressions = test_results.split(" ") failed = 0 success = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(expressions): if "failed" in expression: failed += int(expressions[i - 1]) if "passed" in expression: success += int(expressions[i - 1]) return failed, success, time_spent def handle_stacktraces(test_results): # These files should follow the following architecture: # === FAILURES === # <path>:<line>: Error ... # <path>:<line>: Error ... # <empty line> total_stacktraces = test_results.split("\n")[1:-1] stacktraces = [] for stacktrace in total_stacktraces: try: line = stacktrace[: stacktrace.index(" ")].split(":")[-2] error_message = stacktrace[stacktrace.index(" ") :] stacktraces.append(f"(line {line}) {error_message}") except Exception: stacktraces.append("Cannot retrieve error message.") return stacktraces def dicts_to_sum(objects: Union[Dict[str, Dict], List[dict]]): if isinstance(objects, dict): lists = objects.values() else: lists = objects # Convert each dictionary to counter counters = map(collections.Counter, lists) # Sum all the counters return functools.reduce(operator.add, counters) class Message: def __init__( self, title: str, ci_title: str, model_results: Dict, additional_results: Dict, selected_warnings: List = None, prev_ci_artifacts=None, ): self.title = title self.ci_title = ci_title # Failures and success of the modeling tests self.n_model_success = sum(r["success"] for r in model_results.values()) self.n_model_single_gpu_failures = sum(dicts_to_sum(r["failed"])["single"] for r in model_results.values()) self.n_model_multi_gpu_failures = sum(dicts_to_sum(r["failed"])["multi"] for r in model_results.values()) # Some suites do not have a distinction between single and multi GPU. self.n_model_unknown_failures = sum(dicts_to_sum(r["failed"])["unclassified"] for r in model_results.values()) self.n_model_failures = ( self.n_model_single_gpu_failures + self.n_model_multi_gpu_failures + self.n_model_unknown_failures ) # Failures and success of the additional tests self.n_additional_success = sum(r["success"] for r in additional_results.values()) if len(additional_results) > 0: # `dicts_to_sum` uses `dicts_to_sum` which requires a non empty dictionary. Let's just add an empty entry. all_additional_failures = dicts_to_sum([r["failed"] for r in additional_results.values()]) self.n_additional_single_gpu_failures = all_additional_failures["single"] self.n_additional_multi_gpu_failures = all_additional_failures["multi"] self.n_additional_unknown_gpu_failures = all_additional_failures["unclassified"] else: self.n_additional_single_gpu_failures = 0 self.n_additional_multi_gpu_failures = 0 self.n_additional_unknown_gpu_failures = 0 self.n_additional_failures = ( self.n_additional_single_gpu_failures + self.n_additional_multi_gpu_failures + self.n_additional_unknown_gpu_failures ) # Results self.n_failures = self.n_model_failures + self.n_additional_failures self.n_success = self.n_model_success + self.n_additional_success self.n_tests = self.n_failures + self.n_success self.model_results = model_results self.additional_results = additional_results self.thread_ts = None if selected_warnings is None: selected_warnings = [] self.selected_warnings = selected_warnings self.prev_ci_artifacts = prev_ci_artifacts @property def time(self) -> str: all_results = [*self.model_results.values(), *self.additional_results.values()] time_spent = [r["time_spent"].split(", ")[0] for r in all_results if len(r["time_spent"])] total_secs = 0 for time in time_spent: time_parts = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(time_parts) == 1: time_parts = [0, 0, time_parts[0]] hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return f"{int(hours)}h{int(minutes)}m{int(seconds)}s" @property def header(self) -> Dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def ci_title_section(self) -> Dict: return {"type": "section", "text": {"type": "mrkdwn", "text": self.ci_title}} @property def no_failures(self) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def failures(self) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( f"There were {self.n_failures} failures, out of {self.n_tests} tests.\n" f"Number of model failures: {self.n_model_failures}.\n" f"The suite ran in {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def warnings(self) -> Dict: # If something goes wrong, let's avoid the CI report failing to be sent. button_text = "Check warnings (Link not found)" # Use the workflow run link job_link = f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}" for job in github_actions_jobs: if "Extract warnings in CI artifacts" in job["name"] and job["conclusion"] == "success": button_text = "Check warnings" # Use the actual job link job_link = job["html_url"] break huggingface_hub_warnings = [x for x in self.selected_warnings if "huggingface_hub" in x] text = f"There are {len(self.selected_warnings)} warnings being selected." text += f"\n{len(huggingface_hub_warnings)} of them are from `huggingface_hub`." return { "type": "section", "text": { "type": "plain_text", "text": text, "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": button_text, "emoji": True}, "url": job_link, }, } @staticmethod def get_device_report(report, rjust=6): if "single" in report and "multi" in report: return f"{str(report['single']).rjust(rjust)} | {str(report['multi']).rjust(rjust)} | " elif "single" in report: return f"{str(report['single']).rjust(rjust)} | {'0'.rjust(rjust)} | " elif "multi" in report: return f"{'0'.rjust(rjust)} | {str(report['multi']).rjust(rjust)} | " @property def category_failures(self) -> Dict: model_failures = [v["failed"] for v in self.model_results.values()] category_failures = {} for model_failure in model_failures: for key, value in model_failure.items(): if key not in category_failures: category_failures[key] = dict(value) else: category_failures[key]["unclassified"] += value["unclassified"] category_failures[key]["single"] += value["single"] category_failures[key]["multi"] += value["multi"] individual_reports = [] for key, value in category_failures.items(): device_report = self.get_device_report(value) if sum(value.values()): if device_report: individual_reports.append(f"{device_report}{key}") else: individual_reports.append(key) header = "Single | Multi | Category\n" category_failures_report = prepare_reports( title="The following modeling categories had failures", header=header, reports=individual_reports ) return {"type": "section", "text": {"type": "mrkdwn", "text": category_failures_report}} def compute_diff_for_failure_reports(self, curr_failure_report, prev_failure_report): # noqa # Remove the leading and training parts that don't contain failure count information. model_failures = curr_failure_report.split("\n")[3:-2] prev_model_failures = prev_failure_report.split("\n")[3:-2] entries_changed = set(model_failures).difference(prev_model_failures) prev_map = {} for f in prev_model_failures: items = [x.strip() for x in f.split("| ")] prev_map[items[-1]] = [int(x) for x in items[:-1]] curr_map = {} for f in entries_changed: items = [x.strip() for x in f.split("| ")] curr_map[items[-1]] = [int(x) for x in items[:-1]] diff_map = {} for k, v in curr_map.items(): if k not in prev_map: diff_map[k] = v else: diff = [x - y for x, y in zip(v, prev_map[k])] if max(diff) > 0: diff_map[k] = diff entries_changed = [] for model_name, diff_values in diff_map.items(): diff = [str(x) for x in diff_values] diff = [f"+{x}" if (x != "0" and not x.startswith("-")) else x for x in diff] diff = [x.rjust(9) for x in diff] device_report = " | ".join(diff) + " | " report = f"{device_report}{model_name}" entries_changed.append(report) entries_changed = sorted(entries_changed, key=lambda s: s.split("| ")[-1]) return entries_changed @property def model_failures(self) -> List[Dict]: # Obtain per-model failures def per_model_sum(model_category_dict): return dicts_to_sum(model_category_dict["failed"].values()) failures = {} non_model_failures = { k: per_model_sum(v) for k, v in self.model_results.items() if sum(per_model_sum(v).values()) } for k, v in self.model_results.items(): if k in NON_MODEL_TEST_MODULES: pass if sum(per_model_sum(v).values()): dict_failed = dict(v["failed"]) pytorch_specific_failures = dict_failed.pop("PyTorch") tensorflow_specific_failures = dict_failed.pop("TensorFlow") other_failures = dicts_to_sum(dict_failed.values()) failures[k] = { "PyTorch": pytorch_specific_failures, "TensorFlow": tensorflow_specific_failures, "other": other_failures, } model_reports = [] other_module_reports = [] for key, value in non_model_failures.items(): if key in NON_MODEL_TEST_MODULES: device_report = self.get_device_report(value) if sum(value.values()): if device_report: report = f"{device_report}{key}" else: report = key other_module_reports.append(report) for key, value in failures.items(): device_report_values = [ value["PyTorch"]["single"], value["PyTorch"]["multi"], value["TensorFlow"]["single"], value["TensorFlow"]["multi"], sum(value["other"].values()), ] if sum(device_report_values): device_report = " | ".join([str(x).rjust(9) for x in device_report_values]) + " | " report = f"{device_report}{key}" model_reports.append(report) # (Possibly truncated) reports for the current workflow run - to be sent to Slack channels model_header = "Single PT | Multi PT | Single TF | Multi TF | Other | Category\n" sorted_model_reports = sorted(model_reports, key=lambda s: s.split("| ")[-1]) model_failures_report = prepare_reports( title="These following model modules had failures", header=model_header, reports=sorted_model_reports ) module_header = "Single | Multi | Category\n" sorted_module_reports = sorted(other_module_reports, key=lambda s: s.split("| ")[-1]) module_failures_report = prepare_reports( title="The following non-model modules had failures", header=module_header, reports=sorted_module_reports ) # To be sent to Slack channels model_failure_sections = [ {"type": "section", "text": {"type": "mrkdwn", "text": model_failures_report}}, {"type": "section", "text": {"type": "mrkdwn", "text": module_failures_report}}, ] # Save the complete (i.e. no truncation) failure tables (of the current workflow run) # (to be uploaded as artifacts) model_failures_report = prepare_reports( title="These following model modules had failures", header=model_header, reports=sorted_model_reports, to_truncate=False, ) file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/model_failures_report.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(model_failures_report) module_failures_report = prepare_reports( title="The following non-model modules had failures", header=module_header, reports=sorted_module_reports, to_truncate=False, ) file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/module_failures_report.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(module_failures_report) if self.prev_ci_artifacts is not None: # if the last run produces artifact named `ci_results_{job_name}` if ( f"ci_results_{job_name}" in self.prev_ci_artifacts and "model_failures_report.txt" in self.prev_ci_artifacts[f"ci_results_{job_name}"] ): # Compute the difference of the previous/current (model failure) table prev_model_failures = self.prev_ci_artifacts[f"ci_results_{job_name}"]["model_failures_report.txt"] entries_changed = self.compute_diff_for_failure_reports(model_failures_report, prev_model_failures) if len(entries_changed) > 0: # Save the complete difference diff_report = prepare_reports( title="Changed model modules failures", header=model_header, reports=entries_changed, to_truncate=False, ) file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/changed_model_failures_report.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(diff_report) # To be sent to Slack channels diff_report = prepare_reports( title="*Changed model modules failures*", header=model_header, reports=entries_changed, ) model_failure_sections.append( {"type": "section", "text": {"type": "mrkdwn", "text": diff_report}}, ) return model_failure_sections @property def additional_failures(self) -> Dict: failures = {k: v["failed"] for k, v in self.additional_results.items()} errors = {k: v["error"] for k, v in self.additional_results.items()} individual_reports = [] for key, value in failures.items(): device_report = self.get_device_report(value) if sum(value.values()) or errors[key]: report = f"{key}" if errors[key]: report = f"[Errored out] {report}" if device_report: report = f"{device_report}{report}" individual_reports.append(report) header = "Single | Multi | Category\n" failures_report = prepare_reports( title="The following non-modeling tests had failures", header=header, reports=individual_reports ) return {"type": "section", "text": {"type": "mrkdwn", "text": failures_report}} @property def payload(self) -> str: blocks = [self.header] if self.ci_title: blocks.append(self.ci_title_section) if self.n_model_failures > 0 or self.n_additional_failures > 0: blocks.append(self.failures) if self.n_model_failures > 0: blocks.append(self.category_failures) for block in self.model_failures: if block["text"]["text"]: blocks.append(block) if self.n_additional_failures > 0: blocks.append(self.additional_failures) if self.n_model_failures == 0 and self.n_additional_failures == 0: blocks.append(self.no_failures) if len(self.selected_warnings) > 0: blocks.append(self.warnings) new_failure_blocks = self.get_new_model_failure_blocks(with_header=False) if len(new_failure_blocks) > 0: blocks.extend(new_failure_blocks) # To save the list of new model failures extra_blocks = self.get_new_model_failure_blocks(to_truncate=False) if extra_blocks: failure_text = extra_blocks[-1]["text"]["text"] file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures.txt") with open(file_path, "w", encoding="UTF-8") as fp: fp.write(failure_text) # upload results to Hub dataset file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures.txt") commit_info = api.upload_file( path_or_fileobj=file_path, path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures.txt", repo_id="hf-internal-testing/transformers_daily_ci", repo_type="dataset", token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None), ) url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures.txt" block = { "type": "section", "text": { "type": "plain_text", "text": "bonjour", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check New model failures"}, "url": url, }, } blocks.append(block) return json.dumps(blocks) @staticmethod def error_out(title, ci_title="", runner_not_available=False, runner_failed=False, setup_failed=False): blocks = [] title_block = {"type": "header", "text": {"type": "plain_text", "text": title}} blocks.append(title_block) if ci_title: ci_title_block = {"type": "section", "text": {"type": "mrkdwn", "text": ci_title}} blocks.append(ci_title_block) offline_runners = [] if runner_not_available: text = "💔 CI runners are not available! Tests are not run. 😭" result = os.environ.get("OFFLINE_RUNNERS") if result is not None: offline_runners = json.loads(result) elif runner_failed: text = "💔 CI runners have problems! Tests are not run. 😭" elif setup_failed: text = "💔 Setup job failed. Tests are not run. 😭" else: text = "💔 There was an issue running the tests. 😭" error_block_1 = { "type": "header", "text": { "type": "plain_text", "text": text, }, } text = "" if len(offline_runners) > 0: text = "\n • " + "\n • ".join(offline_runners) text = f"The following runners are offline:\n{text}\n\n" text += "🙏 Let's fix it ASAP! 🙏" error_block_2 = { "type": "section", "text": { "type": "plain_text", "text": text, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } blocks.extend([error_block_1, error_block_2]) payload = json.dumps(blocks) print("Sending the following payload") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, text=text, blocks=payload, ) def post(self): payload = self.payload print("Sending the following payload") print(json.dumps({"blocks": json.loads(payload)})) text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." self.thread_ts = client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, blocks=payload, text=text, ) def get_reply_blocks(self, job_name, job_result, failures, device, text): """ failures: A list with elements of the form {"line": full test name, "trace": error trace} """ # `text` must be less than 3001 characters in Slack SDK # keep some room for adding "[Truncated]" when necessary MAX_ERROR_TEXT = 3000 - len("[Truncated]") failure_text = "" for idx, error in enumerate(failures): new_text = failure_text + f'*{error["line"]}*\n_{error["trace"]}_\n\n' if len(new_text) > MAX_ERROR_TEXT: # `failure_text` here has length <= 3000 failure_text = failure_text + "[Truncated]" break # `failure_text` here has length <= MAX_ERROR_TEXT failure_text = new_text title = job_name if device is not None: title += f" ({device}-gpu)" content = {"type": "section", "text": {"type": "mrkdwn", "text": text}} # TODO: Make sure we always have a valid job link (or at least a way not to break the report sending) # Currently we get the device from a job's artifact name. # If a device is found, the job name should contain the device type, for example, `XXX (single-gpu)`. # This could be done by adding `machine_type` in a job's `strategy`. # (If `job_result["job_link"][device]` is `None`, we get an error: `... [ERROR] must provide a string ...`) if job_result["job_link"] is not None and job_result["job_link"][device] is not None: content["accessory"] = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_result["job_link"][device], } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failure_text}}, ] def get_new_model_failure_blocks(self, with_header=True, to_truncate=True): if self.prev_ci_artifacts is None: return [] sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0]) prev_model_results = {} if ( f"ci_results_{job_name}" in self.prev_ci_artifacts and "model_results.json" in self.prev_ci_artifacts[f"ci_results_{job_name}"] ): prev_model_results = json.loads(self.prev_ci_artifacts[f"ci_results_{job_name}"]["model_results.json"]) all_failure_lines = {} for job, job_result in sorted_dict: if len(job_result["failures"]): devices = sorted(job_result["failures"].keys(), reverse=True) for device in devices: failures = job_result["failures"][device] prev_error_lines = {} if job in prev_model_results and device in prev_model_results[job]["failures"]: prev_error_lines = {error["line"] for error in prev_model_results[job]["failures"][device]} url = None if job_result["job_link"] is not None and job_result["job_link"][device] is not None: url = job_result["job_link"][device] for idx, error in enumerate(failures): if error["line"] in prev_error_lines: continue new_text = f'{error["line"]}\n\n' if new_text not in all_failure_lines: all_failure_lines[new_text] = [] all_failure_lines[new_text].append(f"<{url}|{device}>" if url is not None else device) MAX_ERROR_TEXT = 3000 - len("[Truncated]") - len("```New model failures```\n\n") if not to_truncate: MAX_ERROR_TEXT = float("inf") failure_text = "" for line, devices in all_failure_lines.items(): new_text = failure_text + f"{'|'.join(devices)} gpu\n{line}" if len(new_text) > MAX_ERROR_TEXT: # `failure_text` here has length <= 3000 failure_text = failure_text + "[Truncated]" break # `failure_text` here has length <= MAX_ERROR_TEXT failure_text = new_text blocks = [] if failure_text: if with_header: blocks.append( {"type": "header", "text": {"type": "plain_text", "text": "New model failures", "emoji": True}} ) else: failure_text = f"*New model failures*\n\n{failure_text}" blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": failure_text}}) return blocks def post_reply(self): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): for device, failures in job_result["failures"].items(): text = "\n".join( sorted([f"*{k}*: {v[device]}" for k, v in job_result["failed"].items() if v[device]]) ) blocks = self.get_reply_blocks(job, job_result, failures, device, text=text) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, text=f"Results for {job}", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) for job, job_result in self.additional_results.items(): if len(job_result["failures"]): for device, failures in job_result["failures"].items(): blocks = self.get_reply_blocks( job, job_result, failures, device, text=f'Number of failures: {job_result["failed"][device]}', ) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, text=f"Results for {job}", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) blocks = self.get_new_model_failure_blocks() if blocks: print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, text="Results for new failures", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) def retrieve_artifact(artifact_path: str, gpu: Optional[str]): if gpu not in [None, "single", "multi"]: raise ValueError(f"Invalid GPU for artifact. Passed GPU: `{gpu}`.") _artifact = {} if os.path.exists(artifact_path): files = os.listdir(artifact_path) for file in files: try: with open(os.path.join(artifact_path, file)) as f: _artifact[file.split(".")[0]] = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(artifact_path, file)}.") from e return _artifact def retrieve_available_artifacts(): class Artifact: def __init__(self, name: str, single_gpu: bool = False, multi_gpu: bool = False): self.name = name self.single_gpu = single_gpu self.multi_gpu = multi_gpu self.paths = [] def __str__(self): return self.name def add_path(self, path: str, gpu: str = None): self.paths.append({"name": self.name, "path": path, "gpu": gpu}) _available_artifacts: Dict[str, Artifact] = {} directories = filter(os.path.isdir, os.listdir()) for directory in directories: artifact_name = directory name_parts = artifact_name.split("_postfix_") if len(name_parts) > 1: artifact_name = name_parts[0] if artifact_name.startswith("single-gpu"): artifact_name = artifact_name[len("single-gpu") + 1 :] if artifact_name in _available_artifacts: _available_artifacts[artifact_name].single_gpu = True else: _available_artifacts[artifact_name] = Artifact(artifact_name, single_gpu=True) _available_artifacts[artifact_name].add_path(directory, gpu="single") elif artifact_name.startswith("multi-gpu"): artifact_name = artifact_name[len("multi-gpu") + 1 :] if artifact_name in _available_artifacts: _available_artifacts[artifact_name].multi_gpu = True else: _available_artifacts[artifact_name] = Artifact(artifact_name, multi_gpu=True) _available_artifacts[artifact_name].add_path(directory, gpu="multi") else: if artifact_name not in _available_artifacts: _available_artifacts[artifact_name] = Artifact(artifact_name) _available_artifacts[artifact_name].add_path(directory) return _available_artifacts def prepare_reports(title, header, reports, to_truncate=True): report = "" MAX_ERROR_TEXT = 3000 - len("[Truncated]") if not to_truncate: MAX_ERROR_TEXT = float("inf") if len(reports) > 0: # `text` must be less than 3001 characters in Slack SDK # keep some room for adding "[Truncated]" when necessary for idx in range(len(reports)): _report = header + "\n".join(reports[: idx + 1]) new_report = f"{title}:\n```\n{_report}\n```\n" if len(new_report) > MAX_ERROR_TEXT: # `report` here has length <= 3000 report = report + "[Truncated]" break report = new_report return report if __name__ == "__main__": SLACK_REPORT_CHANNEL_ID = os.environ["SLACK_REPORT_CHANNEL"] # runner_status = os.environ.get("RUNNER_STATUS") # runner_env_status = os.environ.get("RUNNER_ENV_STATUS") setup_status = os.environ.get("SETUP_STATUS") # runner_not_available = True if runner_status is not None and runner_status != "success" else False # runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False # Let's keep the lines regardig runners' status (we might be able to use them again in the future) runner_not_available = False runner_failed = False # Some jobs don't depend (`needs`) on the job `setup`: in this case, the status of the job `setup` is `skipped`. setup_failed = False if setup_status in ["skipped", "success"] else True org = "huggingface" repo = "transformers" repository_full_name = f"{org}/{repo}" # This env. variable is set in workflow file (under the job `send_results`). ci_event = os.environ["CI_EVENT"] # To find the PR number in a commit title, for example, `Add AwesomeFormer model (#99999)` pr_number_re = re.compile(r"\(#(\d+)\)$") title = f"🤗 Results of {ci_event} - {os.getenv('CI_TEST_JOB')}." # Add Commit/PR title with a link for push CI # (check the title in 2 env. variables - depending on the CI is triggered via `push` or `workflow_run` event) ci_title_push = os.environ.get("CI_TITLE_PUSH") ci_title_workflow_run = os.environ.get("CI_TITLE_WORKFLOW_RUN") ci_title = ci_title_push if ci_title_push else ci_title_workflow_run ci_sha = os.environ.get("CI_SHA") ci_url = None if ci_sha: ci_url = f"https://github.com/{repository_full_name}/commit/{ci_sha}" if ci_title is not None: if ci_url is None: raise ValueError( "When a title is found (`ci_title`), it means a `push` event or a `workflow_run` even (triggered by " "another `push` event), and the commit SHA has to be provided in order to create the URL to the " "commit page." ) ci_title = ci_title.strip().split("\n")[0].strip() # Retrieve the PR title and author login to complete the report commit_number = ci_url.split("/")[-1] ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/commits/{commit_number}" ci_details = requests.get(ci_detail_url).json() ci_author = ci_details["author"]["login"] merged_by = None # Find the PR number (if any) and change the url to the actual PR page. numbers = pr_number_re.findall(ci_title) if len(numbers) > 0: pr_number = numbers[0] ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/pulls/{pr_number}" ci_details = requests.get(ci_detail_url).json() ci_author = ci_details["user"]["login"] ci_url = f"https://github.com/{repository_full_name}/pull/{pr_number}" merged_by = ci_details["merged_by"]["login"] if merged_by is None: ci_title = f"<{ci_url}|{ci_title}>\nAuthor: {ci_author}" else: ci_title = f"<{ci_url}|{ci_title}>\nAuthor: {ci_author} | Merged by: {merged_by}" elif ci_sha: ci_title = f"<{ci_url}|commit: {ci_sha}>" else: ci_title = "" if runner_not_available or runner_failed or setup_failed: Message.error_out(title, ci_title, runner_not_available, runner_failed, setup_failed) exit(0) # sys.argv[0] is always `utils/notification_service.py`. arguments = sys.argv[1:] # In our usage in `.github/workflows/slack-report.yml`, we always pass an argument when calling this script. # The argument could be an empty string `""` if a job doesn't depend on the job `setup`. if arguments[0] == "": models = [] else: model_list_as_str = arguments[0] try: folder_slices = ast.literal_eval(model_list_as_str) # Need to change from elements like `models/bert` to `models_bert` (the ones used as artifact names). models = [x.replace("models/", "models_") for folders in folder_slices for x in folders] except Exception: Message.error_out(title, ci_title) raise ValueError("Errored out.") github_actions_jobs = get_jobs( workflow_run_id=os.environ["GITHUB_RUN_ID"], token=os.environ["ACCESS_REPO_INFO_TOKEN"] ) github_actions_job_links = {job["name"]: job["html_url"] for job in github_actions_jobs} artifact_name_to_job_map = {} for job in github_actions_jobs: for step in job["steps"]: if step["name"].startswith("Test suite reports artifacts: "): artifact_name = step["name"][len("Test suite reports artifacts: ") :] artifact_name_to_job_map[artifact_name] = job break available_artifacts = retrieve_available_artifacts() modeling_categories = [ "PyTorch", "TensorFlow", "Flax", "Tokenizers", "Pipelines", "Trainer", "ONNX", "Auto", "Unclassified", ] # This dict will contain all the information relative to each model: # - Failures: the total, as well as the number of failures per-category defined above # - Success: total # - Time spent: as a comma-separated list of elapsed time # - Failures: as a line-break separated list of errors model_results = { model: { "failed": {m: {"unclassified": 0, "single": 0, "multi": 0} for m in modeling_categories}, "success": 0, "time_spent": "", "failures": {}, "job_link": {}, } for model in models if f"run_models_gpu_{model}_test_reports" in available_artifacts } unclassified_model_failures = [] for model in model_results.keys(): for artifact_path in available_artifacts[f"run_models_gpu_{model}_test_reports"].paths: artifact = retrieve_artifact(artifact_path["path"], artifact_path["gpu"]) if "stats" in artifact: # Link to the GitHub Action job job = artifact_name_to_job_map[artifact_path["path"]] model_results[model]["job_link"][artifact_path["gpu"]] = job["html_url"] failed, success, time_spent = handle_test_results(artifact["stats"]) model_results[model]["success"] += success model_results[model]["time_spent"] += time_spent[1:-1] + ", " stacktraces = handle_stacktraces(artifact["failures_line"]) for line in artifact["summary_short"].split("\n"): if line.startswith("FAILED "): line = line[len("FAILED ") :] line = line.split()[0].replace("\n", "") if artifact_path["gpu"] not in model_results[model]["failures"]: model_results[model]["failures"][artifact_path["gpu"]] = [] model_results[model]["failures"][artifact_path["gpu"]].append( {"line": line, "trace": stacktraces.pop(0)} ) if re.search("test_modeling_tf_", line): model_results[model]["failed"]["TensorFlow"][artifact_path["gpu"]] += 1 elif re.search("test_modeling_flax_", line): model_results[model]["failed"]["Flax"][artifact_path["gpu"]] += 1 elif re.search("test_modeling", line): model_results[model]["failed"]["PyTorch"][artifact_path["gpu"]] += 1 elif re.search("test_tokenization", line): model_results[model]["failed"]["Tokenizers"][artifact_path["gpu"]] += 1 elif re.search("test_pipelines", line): model_results[model]["failed"]["Pipelines"][artifact_path["gpu"]] += 1 elif re.search("test_trainer", line): model_results[model]["failed"]["Trainer"][artifact_path["gpu"]] += 1 elif re.search("onnx", line): model_results[model]["failed"]["ONNX"][artifact_path["gpu"]] += 1 elif re.search("auto", line): model_results[model]["failed"]["Auto"][artifact_path["gpu"]] += 1 else: model_results[model]["failed"]["Unclassified"][artifact_path["gpu"]] += 1 unclassified_model_failures.append(line) # Additional runs additional_files = { "PyTorch pipelines": "run_pipelines_torch_gpu_test_reports", "TensorFlow pipelines": "run_pipelines_tf_gpu_test_reports", "Examples directory": "run_examples_gpu_test_reports", "Torch CUDA extension tests": "run_torch_cuda_extensions_gpu_test_reports", } if ci_event in ["push", "Nightly CI"] or ci_event.startswith("Past CI"): del additional_files["Examples directory"] del additional_files["PyTorch pipelines"] del additional_files["TensorFlow pipelines"] elif ci_event.startswith("Scheduled CI (AMD)"): del additional_files["TensorFlow pipelines"] del additional_files["Torch CUDA extension tests"] elif ci_event.startswith("Push CI (AMD)"): additional_files = {} # A map associating the job names (specified by `inputs.job` in a workflow file) with the keys of # `additional_files`. This is used to remove some entries in `additional_files` that are not concerned by a # specific job. See below. job_to_test_map = { "run_pipelines_torch_gpu": "PyTorch pipelines", "run_pipelines_tf_gpu": "TensorFlow pipelines", "run_examples_gpu": "Examples directory", "run_torch_cuda_extensions_gpu": "Torch CUDA extension tests", } # Remove some entries in `additional_files` if they are not concerned. test_name = None job_name = os.getenv("CI_TEST_JOB") if job_name in job_to_test_map: test_name = job_to_test_map[job_name] additional_files = {k: v for k, v in additional_files.items() if k == test_name} additional_results = { key: { "failed": {"unclassified": 0, "single": 0, "multi": 0}, "success": 0, "time_spent": "", "error": False, "failures": {}, "job_link": {}, } for key in additional_files.keys() } for key in additional_results.keys(): # If a whole suite of test fails, the artifact isn't available. if additional_files[key] not in available_artifacts: additional_results[key]["error"] = True continue for artifact_path in available_artifacts[additional_files[key]].paths: # Link to the GitHub Action job job = artifact_name_to_job_map[artifact_path["path"]] additional_results[key]["job_link"][artifact_path["gpu"]] = job["html_url"] artifact = retrieve_artifact(artifact_path["path"], artifact_path["gpu"]) stacktraces = handle_stacktraces(artifact["failures_line"]) failed, success, time_spent = handle_test_results(artifact["stats"]) additional_results[key]["failed"][artifact_path["gpu"] or "unclassified"] += failed additional_results[key]["success"] += success additional_results[key]["time_spent"] += time_spent[1:-1] + ", " if len(artifact["errors"]): additional_results[key]["error"] = True if failed: for line in artifact["summary_short"].split("\n"): if line.startswith("FAILED "): line = line[len("FAILED ") :] line = line.split()[0].replace("\n", "") if artifact_path["gpu"] not in additional_results[key]["failures"]: additional_results[key]["failures"][artifact_path["gpu"]] = [] additional_results[key]["failures"][artifact_path["gpu"]].append( {"line": line, "trace": stacktraces.pop(0)} ) # Let's only check the warning for the model testing job. Currently, the job `run_extract_warnings` is only run # when `inputs.job` (in the workflow file) is `run_models_gpu`. The reason is: otherwise we need to save several # artifacts with different names which complicates the logic for an insignificant part of the CI workflow reporting. selected_warnings = [] if job_name == "run_models_gpu": if "warnings_in_ci" in available_artifacts: directory = available_artifacts["warnings_in_ci"].paths[0]["path"] with open(os.path.join(directory, "selected_warnings.json")) as fp: selected_warnings = json.load(fp) if not os.path.isdir(os.path.join(os.getcwd(), f"ci_results_{job_name}")): os.makedirs(os.path.join(os.getcwd(), f"ci_results_{job_name}")) target_workflow = "huggingface/transformers/.github/workflows/self-scheduled-caller.yml@refs/heads/main" is_scheduled_ci_run = os.environ.get("CI_WORKFLOW_REF") == target_workflow # Only the model testing job is concerned: this condition is to avoid other jobs to upload the empty list as # results. if job_name == "run_models_gpu": with open(f"ci_results_{job_name}/model_results.json", "w", encoding="UTF-8") as fp: json.dump(model_results, fp, indent=4, ensure_ascii=False) # upload results to Hub dataset (only for the scheduled daily CI run on `main`) if is_scheduled_ci_run: api.upload_file( path_or_fileobj=f"ci_results_{job_name}/model_results.json", path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/model_results.json", repo_id="hf-internal-testing/transformers_daily_ci", repo_type="dataset", token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None), ) # Must have the same keys as in `additional_results`. # The values are used as the file names where to save the corresponding CI job results. test_to_result_name = { "PyTorch pipelines": "torch_pipeline", "TensorFlow pipelines": "tf_pipeline", "Examples directory": "example", "Torch CUDA extension tests": "deepspeed", } for job, job_result in additional_results.items(): with open(f"ci_results_{job_name}/{test_to_result_name[job]}_results.json", "w", encoding="UTF-8") as fp: json.dump(job_result, fp, indent=4, ensure_ascii=False) # upload results to Hub dataset (only for the scheduled daily CI run on `main`) if is_scheduled_ci_run: api.upload_file( path_or_fileobj=f"ci_results_{job_name}/{test_to_result_name[job]}_results.json", path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/{test_to_result_name[job]}_results.json", repo_id="hf-internal-testing/transformers_daily_ci", repo_type="dataset", token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None), ) prev_ci_artifacts = None if is_scheduled_ci_run: if job_name == "run_models_gpu": # Get the last previously completed CI's failure tables artifact_names = [f"ci_results_{job_name}"] output_dir = os.path.join(os.getcwd(), "previous_reports") os.makedirs(output_dir, exist_ok=True) prev_ci_artifacts = get_last_daily_ci_reports( artifact_names=artifact_names, output_dir=output_dir, token=os.environ["ACCESS_REPO_INFO_TOKEN"] ) message = Message( title, ci_title, model_results, additional_results, selected_warnings=selected_warnings, prev_ci_artifacts=prev_ci_artifacts, ) # send report only if there is any failure (for push CI) if message.n_failures or (ci_event != "push" and not ci_event.startswith("Push CI (AMD)")): message.post() message.post_reply()
transformers/utils/notification_service.py/0
{ "file_path": "transformers/utils/notification_service.py", "repo_id": "transformers", "token_count": 24162 }
446
from transformers import CLIPImageProcessor class CustomImageProcessor(CLIPImageProcessor): pass
transformers/utils/test_module/custom_image_processing.py/0
{ "file_path": "transformers/utils/test_module/custom_image_processing.py", "repo_id": "transformers", "token_count": 29 }
447
# pip install openrlbenchmark==0.2.1a5 # see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation echo "we deal with $TAGS_STRING" python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "ppo$TAGS_STRING" \ "ppo_gpt2xl_grad_accu$TAGS_STRING" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/different_models \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "ppo_Cerebras-GPT-6.7B_grad_accu_deepspeed_stage2$TAGS_STRING" \ --env-ids sentiment-analysis:cerebras/Cerebras-GPT-6.7B \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/deepspeed \ --scan-history python benchmark/upload_benchmark.py \ --folder_path="benchmark/trl/$FOLDER_STRING" \ --path_in_repo="images/benchmark/$FOLDER_STRING" \ --repo_id="trl-internal-testing/example-images" \ --repo_type="dataset"
trl/benchmark/benchmark_level2_plot.sh/0
{ "file_path": "trl/benchmark/benchmark_level2_plot.sh", "repo_id": "trl", "token_count": 632 }
448
# Callbacks ## SyncRefModelCallback [[autodoc]] SyncRefModelCallback ## RichProgressCallback [[autodoc]] RichProgressCallback ## WinRateCallback [[autodoc]] WinRateCallback
trl/docs/source/callbacks.mdx/0
{ "file_path": "trl/docs/source/callbacks.mdx", "repo_id": "trl", "token_count": 54 }
449
# Examples of using peft with trl to finetune 8-bit models with Low Rank Adaption (LoRA) The notebooks and scripts in this examples show how to use Low Rank Adaptation (LoRA) to fine-tune models in a memory efficient manner. Most of PEFT methods supported in peft library but note that some PEFT methods such as Prompt tuning are not supported. For more information on LoRA, see the [original paper](https://huggingface.co/papers/2106.09685). Here's an overview of the `peft`-enabled notebooks and scripts in the [trl repository](https://github.com/huggingface/trl/tree/main/examples): | File | Task | Description | Colab link | |---|---| --- | | [`stack_llama/rl_training.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama/scripts/rl_training.py) | RLHF | Distributed fine-tuning of the 7b parameter LLaMA models with a learned reward model and `peft`. | | | [`stack_llama/reward_modeling.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama/scripts/reward_modeling.py) | Reward Modeling | Distributed training of the 7b parameter LLaMA reward model with `peft`. | | | [`stack_llama/supervised_finetuning.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama/scripts/supervised_finetuning.py) | SFT | Distributed instruction/supervised fine-tuning of the 7b parameter LLaMA model with `peft`. | | ## Installation Note: peft is in active development, so we install directly from their Github page. Peft also relies on the latest version of transformers. ```bash pip install trl[peft] pip install bitsandbytes loralib pip install git+https://github.com/huggingface/transformers.git@main #optional: wandb pip install wandb ``` Note: if you don't want to log with `wandb` remove `log_with="wandb"` in the scripts/notebooks. You can also replace it with your favourite experiment tracker that's [supported by `accelerate`](https://huggingface.co/docs/accelerate/usage_guides/tracking). ## How to use it? Simply declare a `PeftConfig` object in your script and pass it through `.from_pretrained` to load the TRL+PEFT model. ```python from peft import LoraConfig from trl import AutoModelForCausalLMWithValueHead model_id = "edbeeching/gpt-neo-125M-imdb" lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLMWithValueHead.from_pretrained( model_id, peft_config=lora_config, ) ``` And if you want to load your model in 8bit precision: ```python pretrained_model = AutoModelForCausalLMWithValueHead.from_pretrained( config.model_name, load_in_8bit=True, peft_config=lora_config, ) ``` ... or in 4bit precision: ```python pretrained_model = AutoModelForCausalLMWithValueHead.from_pretrained( config.model_name, peft_config=lora_config, load_in_4bit=True, ) ``` ## Launch scripts The `trl` library is powered by `accelerate`. As such it is best to configure and launch trainings with the following commands: ```bash accelerate config # will prompt you to define the training configuration accelerate launch examples/scripts/ppo.py --use_peft # launch`es training ``` ## Using `trl` + `peft` and Data Parallelism You can scale up to as many GPUs as you want, as long as you are able to fit the training process in a single device. The only tweak you need to apply is to load the model as follows: ```python from peft import LoraConfig ... lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) pretrained_model = AutoModelForCausalLMWithValueHead.from_pretrained( config.model_name, peft_config=lora_config, ) ``` And if you want to load your model in 8bit precision: ```python pretrained_model = AutoModelForCausalLMWithValueHead.from_pretrained( config.model_name, peft_config=lora_config, load_in_8bit=True, ) ``` ... or in 4bit precision: ```python pretrained_model = AutoModelForCausalLMWithValueHead.from_pretrained( config.model_name, peft_config=lora_config, load_in_4bit=True, ) ``` Finally, make sure that the rewards are computed on correct device as well, for that you can use `ppo_trainer.model.current_device`. ## Naive pipeline parallelism (NPP) for large models (>60B models) The `trl` library also supports naive pipeline parallelism (NPP) for large models (>60B models). This is a simple way to parallelize the model across multiple GPUs. This paradigm, termed as "Naive Pipeline Parallelism" (NPP) is a simple way to parallelize the model across multiple GPUs. We load the model and the adapters across multiple GPUs and the activations and gradients will be naively communicated across the GPUs. This supports `int8` models as well as other `dtype` models. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-npp.png"> </div> ### How to use NPP? Simply load your model with a custom `device_map` argument on the `from_pretrained` to split your model across multiple devices. Check out this [nice tutorial](https://github.com/huggingface/blog/blob/main/accelerate-large-models.md) on how to properly create a `device_map` for your model. Also make sure to have the `lm_head` module on the first GPU device as it may throw an error if it is not on the first device. As this time of writing, you need to install the `main` branch of `accelerate`: `pip install git+https://github.com/huggingface/accelerate.git@main` and `peft`: `pip install git+https://github.com/huggingface/peft.git@main`. ### Launch scripts Although `trl` library is powered by `accelerate`, you should run your training script in a single process. Note that we do not support Data Parallelism together with NPP yet. ```bash python PATH_TO_SCRIPT ``` ## Fine-tuning Llama-2 model You can easily fine-tune Llama2 model using `SFTTrainer` and the official script! For example to fine-tune llama2-7b on the Guanaco dataset, run (tested on a single NVIDIA T4-16GB): ```bash python examples/scripts/sft.py --output_dir sft_openassistant-guanaco --model_name meta-llama/Llama-2-7b-hf --dataset_name timdettmers/openassistant-guanaco --load_in_4bit --use_peft --per_device_train_batch_size 4 --gradient_accumulation_steps 2 ```
trl/docs/source/lora_tuning_peft.mdx/0
{ "file_path": "trl/docs/source/lora_tuning_peft.mdx", "repo_id": "trl", "token_count": 2081 }
450
# Examples Please check out https://huggingface.co/docs/trl/example_overview for documentation on our examples.
trl/examples/README.md/0
{ "file_path": "trl/examples/README.md", "repo_id": "trl", "token_count": 30 }
451
<jupyter_start><jupyter_text>Tune GPT2 to generate positive reviews> Optimise GPT2 to produce positive IMDB movie reviews using a BERT sentiment classifier as a reward function. Figure: Experiment setup to tune GPT2. The yellow arrows are outside the scope of this notebook, but the trained models are available through Hugging Face. In this notebook we fine-tune GPT2 (small) to generate positive movie reviews based on the IMDB dataset. The model gets the start of a real review and is tasked to produce positive continuations. To reward positive continuations we use a BERT classifier to analyse the sentiment of the produced sentences and use the classifier's outputs as rewards signals for PPO training. Setup experiment Import dependencies<jupyter_code>%load_ext autoreload %autoreload 2 %pip install transformers trl wandb import torch from tqdm import tqdm import pandas as pd tqdm.pandas() from transformers import pipeline, AutoTokenizer from datasets import load_dataset from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead from trl.core import LengthSampler<jupyter_output><empty_output><jupyter_text>Configuration<jupyter_code>config = PPOConfig( model_name="lvwerra/gpt2-imdb", learning_rate=1.41e-5, log_with="wandb", ) sent_kwargs = {"top_k": None, "function_to_apply": "none", "batch_size": 16} import wandb wandb.init()<jupyter_output><empty_output><jupyter_text>You can see that we load a GPT2 model called `gpt2_imdb`. This model was additionally fine-tuned on the IMDB dataset for 1 epoch with the huggingface [script](https://github.com/huggingface/transformers/blob/main/examples/legacy/run_language_modeling.py) (no special settings). The other parameters are mostly taken from the original paper ["Fine-Tuning Language Models from Human Preferences"](https://huggingface.co/papers/1909.08593). This model as well as the BERT model is available in the Huggingface model zoo [here](https://huggingface.co/models). The following code should automatically download the models. Load data and models Load IMDB datasetThe IMDB dataset contains 50k movie review annotated with "positive"/"negative" feedback indicating the sentiment. We load the IMDB dataset into a DataFrame and filter for comments that are at least 200 characters. Then we tokenize each text and cut it to random size with the `LengthSampler`.<jupyter_code>def build_dataset(config, dataset_name="imdb", input_min_text_length=2, input_max_text_length=8): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: dataset_name (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token # load imdb with datasets ds = load_dataset(dataset_name, split="train") ds = ds.rename_columns({"text": "review"}) ds = ds.filter(lambda x: len(x["review"]) > 200, batched=False) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): sample["input_ids"] = tokenizer.encode(sample["review"])[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, batched=False) ds.set_format(type="torch") return ds dataset = build_dataset(config) def collator(data): return dict((key, [d[key] for d in data]) for key in data[0])<jupyter_output><empty_output><jupyter_text>Load pre-trained GPT2 language models We load the GPT2 model with a value head and the tokenizer. We load the model twice; the first model is optimized while the second model serves as a reference to calculate the KL-divergence from the starting point. This serves as an additional reward signal in the PPO training to make sure the optimized model does not deviate too much from the original language model.<jupyter_code>model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name) ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name) tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token<jupyter_output><empty_output><jupyter_text>Initialize PPOTrainerThe `PPOTrainer` takes care of device placement and optimization later on:<jupyter_code>ppo_trainer = PPOTrainer(config, model, ref_model, tokenizer, dataset=dataset, data_collator=collator)<jupyter_output><empty_output><jupyter_text>Load BERT classifierWe load a BERT classifier fine-tuned on the IMDB dataset.<jupyter_code>device = ppo_trainer.accelerator.device if ppo_trainer.accelerator.num_processes == 1: device = 0 if torch.cuda.is_available() else "cpu" # to avoid a `pipeline` bug sentiment_pipe = pipeline("sentiment-analysis", model="lvwerra/distilbert-imdb", device=device)<jupyter_output><empty_output><jupyter_text>The model outputs are the logits for the negative and positive class. We will use the logits for positive class as a reward signal for the language model.<jupyter_code>text = "this movie was really bad!!" sentiment_pipe(text, **sent_kwargs) text = "this movie was really good!!" sentiment_pipe(text, **sent_kwargs)<jupyter_output><empty_output><jupyter_text>Generation settingsFor the response generation we just use sampling and make sure top-k and nucleus sampling are turned off as well as a minimal length.<jupyter_code>gen_kwargs = {"min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id}<jupyter_output><empty_output><jupyter_text>Optimize model Training loop The training loop consists of the following main steps:1. Get the query responses from the policy network (GPT-2)2. Get sentiments for query/responses from BERT3. Optimize policy with PPO using the (query, response, reward) triplet**Training time**This step takes **~2h** on a V100 GPU with the above specified settings.<jupyter_code>output_min_length = 4 output_max_length = 16 output_length_sampler = LengthSampler(output_min_length, output_max_length) generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, } for epoch, batch in enumerate(tqdm(ppo_trainer.dataloader)): query_tensors = batch["input_ids"] #### Get response from gpt2 response_tensors = [] for query in query_tensors: gen_len = output_length_sampler() generation_kwargs["max_new_tokens"] = gen_len query_response = ppo_trainer.generate(query, **generation_kwargs).squeeze() response_len = len(query_response) - len(query) response_tensors.append(query_response[-response_len:]) batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors] #### Compute sentiment score texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) positive_scores = [item["score"] for output in pipe_outputs for item in output if item["label"] == "POSITIVE"] rewards = [torch.tensor(score) for score in positive_scores] #### Run PPO step stats = ppo_trainer.step(query_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards)<jupyter_output><empty_output><jupyter_text>Training progressIf you are tracking the training progress with Weights&Biases you should see a plot similar to the one below. Check out the interactive sample report on wandb.ai: [link](https://wandb.ai/huggingface/trl/runs/w9l3110g). Figure: Reward mean and distribution evolution during training. One can observe how the model starts to generate more positive outputs after a few optimisation steps.> Note: Investigating the KL-divergence will probably show that at this point the model has not converged to the target KL-divergence, yet. To get there would require longer training or starting with a higher initial coefficient. Model inspectionLet's inspect some examples from the IMDB dataset. We can use `ref_model` to compare the tuned model `model` against the model before optimisation.<jupyter_code>#### get a batch from the dataset bs = 16 game_data = dict() dataset.set_format("pandas") df_batch = dataset[:].sample(bs) game_data["query"] = df_batch["query"].tolist() query_tensors = df_batch["input_ids"].tolist() response_tensors_ref, response_tensors = [], [] #### get response from gpt2 and gpt2_ref for i in range(bs): query = torch.tensor(query_tensors[i]).to(device) gen_len = output_length_sampler() query_response = ref_model.generate(query.unsqueeze(0), max_new_tokens=gen_len, **gen_kwargs).squeeze() response_len = len(query_response) - len(query) response_tensors_ref.append(query_response[-response_len:]) query_response = model.generate(query.unsqueeze(0), max_new_tokens=gen_len, **gen_kwargs).squeeze() response_len = len(query_response) - len(query) response_tensors.append(query_response[-response_len:]) #### decode responses game_data["response (before)"] = [tokenizer.decode(response_tensors_ref[i]) for i in range(bs)] game_data["response (after)"] = [tokenizer.decode(response_tensors[i]) for i in range(bs)] #### sentiment analysis of query/response pairs before/after texts = [q + r for q, r in zip(game_data["query"], game_data["response (before)"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) positive_scores = [item["score"] for output in pipe_outputs for item in output if item["label"] == "POSITIVE"] game_data["rewards (before)"] = positive_scores texts = [q + r for q, r in zip(game_data["query"], game_data["response (after)"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) positive_scores = [item["score"] for output in pipe_outputs for item in output if item["label"] == "POSITIVE"] game_data["rewards (after)"] = positive_scores # store results in a dataframe df_results = pd.DataFrame(game_data) df_results<jupyter_output><empty_output><jupyter_text>Looking at the reward mean/median of the generated sequences we observe a significant difference.<jupyter_code>print("mean:") display(df_results[["rewards (before)", "rewards (after)"]].mean()) print() print("median:") display(df_results[["rewards (before)", "rewards (after)"]].median())<jupyter_output>mean:<jupyter_text>Save modelFinally, we save the model and push it to the Hugging Face for later usage.<jupyter_code>model.save_pretrained("gpt2-imdb-pos-v2", push_to_hub=True) tokenizer.save_pretrained("gpt2-imdb-pos-v2", push_to_hub=True)<jupyter_output><empty_output>
trl/examples/notebooks/gpt2-sentiment.ipynb/0
{ "file_path": "trl/examples/notebooks/gpt2-sentiment.ipynb", "repo_id": "trl", "token_count": 3529 }
452
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torch.optim import Adam from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, RobertaForSequenceClassification, RobertaTokenizer, ) from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, create_reference_model, set_seed from trl.core import LengthSampler tqdm.pandas() ######################################################################## # This is a fully working simple example to use trl with accelerate. # # This example fine-tunes a GPTJ model to generate less toxic contents # by using allenai/real-toxicity-prompts dataset. We use PPO # (proximal policy optimization) to optimize the model. # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - multi GPUS (using DeepSpeed ZeRO-Offload stages 1 & 2) # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, first initialize the accelerate # configuration with `accelerate config` # ######################################################################## # We first define the configuration of the experiment, defining the model, the dataset, # the training parameters, and the PPO parameters. # Check the default arguments in the `PPOConfig` class for more details. # If you want to log with tensorboard, add the kwarg # `project_kwargs={"logging_dir": PATH_TO_LOGS}` to the PPOConfig. @dataclass class ScriptArguments: """ The name of the Casual LM model we wish to fine-tune with PPO """ # NOTE: gpt2 models use Conv1D instead of Linear layers which are not yet supported in 8 bit mode # models like gpt-neo* models are more suitable. model_name: Optional[str] = field(default="ybelkada/gpt-j-6b-sharded-bf16", metadata={"help": "the model name"}) log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) learning_rate: Optional[float] = field(default=(1.47e-5) * 2, metadata={"help": "the learning rate"}) mini_batch_size: Optional[int] = field(default=4, metadata={"help": "the PPO minibatch size"}) batch_size: Optional[int] = field(default=16, metadata={"help": "the batch size"}) gradient_accumulation_steps: Optional[int] = field( default=1, metadata={"help": "the number of gradient accumulation steps"} ) model_save_path: Optional[str] = field( default="./gpt-j-6B-detoxified-long-context-26-shl-1e4-final", metadata={"help": "the path to save the model"}, ) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] config = PPOConfig( model_name=script_args.model_name, learning_rate=script_args.learning_rate, log_with=script_args.log_with, ppo_epochs=100, mini_batch_size=script_args.mini_batch_size, batch_size=script_args.batch_size, gradient_accumulation_steps=script_args.gradient_accumulation_steps, ) # Below is an example function to build the dataset. In our case, we use the IMDB dataset # from the `datasets` library. One should customize this function to train the model on # its own dataset. def build_dataset( config, dataset_name="allenai/real-toxicity-prompts", input_min_text_length=5, input_max_text_length=10 ): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: dataset_name (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token ds = load_dataset(dataset_name, split="train") def filter_fn(sample): toxicity = sample["prompt"]["toxicity"] return toxicity is not None and toxicity > 0.3 ds = ds.filter(filter_fn, batched=False) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): prompt = sample["prompt"]["text"] continuation = sample["continuation"]["text"] sample["input_ids"] = tokenizer.encode(prompt + continuation)[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, batched=False) ds.set_format(type="torch") ds = ds.train_test_split(test_size=0.2, shuffle=False)["train"] return ds # We retrieve the dataloader by calling the `build_dataset` function. min_input_length = 30 max_input_length = 40 dataset = build_dataset(config, input_min_text_length=min_input_length, input_max_text_length=max_input_length) def collator(data): return {key: [d[key] for d in data] for key in data[0]} # set seed before initializing value head for deterministic eval set_seed(config.seed) # Now let's build the model, the reference model, and the tokenizer. We first load the model # in bfloat16 to save memory using `transformers`. model = AutoModelForCausalLM.from_pretrained(config.model_name, torch_dtype=torch.bfloat16) # And then we pass the loaded model to `AutoModelForCausalLMWithValueHead`. model = AutoModelForCausalLMWithValueHead.from_pretrained(model) # We create a reference model by sharing 20 layers ref_model = create_reference_model(model, num_shared_layers=20) # We make sure to use `Adam` optimizer on the model parameters that require gradients. optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.learning_rate) # GPT-2 / GPT-J tokenizer has a pad token, but it is not eos_token by default. We need to set it to eos_token. # only for this model. tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token # We then build the PPOTrainer, passing the model, the reference model, the tokenizer ppo_trainer = PPOTrainer( config, model, ref_model=ref_model, tokenizer=tokenizer, dataset=dataset, data_collator=collator, optimizer=optimizer, ) # We then build the reward pipeline, we will use the toxicity model to compute the reward. # We first load the toxicity model and tokenizer. toxicity_model_id = "facebook/roberta-hate-speech-dynabench-r4-target" toxicity_tokenizer = RobertaTokenizer.from_pretrained(toxicity_model_id) # We load the toxicity model in fp16 to save memory. toxicity_model = RobertaForSequenceClassification.from_pretrained(toxicity_model_id, torch_dtype=torch.float16).to( ppo_trainer.accelerator.device ) # We then define the arguments to pass to the `generate` function. These arguments # are passed to the `generate` function of the PPOTrainer, which is a wrapper around # the `generate` function of the trained model. generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, } output_min_length = 20 output_max_length = 30 output_length_sampler = LengthSampler(output_min_length, output_max_length) model_save_path = script_args.model_save_path for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): query_tensors = batch["input_ids"] # Get response from the policy model response_tensors = [] for query in query_tensors: gen_len = output_length_sampler() generation_kwargs["max_new_tokens"] = gen_len response = ppo_trainer.generate(query, **generation_kwargs) response_tensors.append(response.squeeze()[-gen_len:]) batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors] # Compute sentiment score texts = batch["response"] toxicity_inputs = toxicity_tokenizer(texts, padding=True, truncation=True, return_tensors="pt").to( ppo_trainer.accelerator.device ) logits = toxicity_model(**toxicity_inputs).logits.float() toxicity_labels = (logits[:, 0]).tolist() rewards = [torch.tensor(output) for output in toxicity_labels] # Run PPO step stats = ppo_trainer.step(query_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards) # Save model every 100 epochs if epoch % 100 == 0: if ppo_trainer.accelerator.is_main_process: ppo_trainer.save_pretrained(model_save_path)
trl/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py/0
{ "file_path": "trl/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py", "repo_id": "trl", "token_count": 3133 }
453
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional import torch from accelerate import PartialState from datasets import load_dataset from peft import LoraConfig from tqdm import tqdm from transformers import AutoTokenizer, BitsAndBytesConfig, HfArgumentParser from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer from trl.core import LengthSampler from trl.import_utils import is_npu_available, is_xpu_available input_min_text_length = 6 input_max_text_length = 12 @dataclass class ScriptArguments: """ The name of the Casual LM model we wish to fine with PPO """ model_name: Optional[str] = field(default="huggyllama/llama-7b", metadata={"help": "the model name"}) dataset_name: Optional[str] = field(default="Anthropic/hh-rlhf", metadata={"help": "the dataset name"}) rm_adapter: Optional[str] = field( default="trl-lib/llama-7b-hh-rm-adapter", metadata={"help": "the rm adapter name"} ) log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) use_safetensors: Optional[bool] = field(default=False, metadata={"help": "Use safetensors"}) seed: Optional[int] = field(default=0, metadata={"help": "the random seed"}) use_score_scaling: Optional[bool] = field(default=False, metadata={"help": "Use score scaling"}) use_score_norm: Optional[bool] = field( default=False, metadata={"help": "Use score normalization. Only applicable if use_score_scaling is True"} ) score_clip: Optional[float] = field(default=None, metadata={"help": "Score clipping"}) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "The number of workers to use to tokenize the data"} ) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] def create_and_prepare_dataset(tokenizer, num_proc): dataset = load_dataset(script_args.dataset_name, split="train[:1%]") input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(example): text_size = input_size() example["input_ids"] = tokenizer.encode(example["chosen"])[:text_size] example["query"] = tokenizer.decode(example["input_ids"]) return example dataset = dataset.map(tokenize, batched=False, num_proc=num_proc) dataset.set_format("torch") return dataset lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16 ) model = AutoModelForCausalLMWithValueHead.from_pretrained( script_args.model_name, device_map={"": "xpu:0"} if is_xpu_available() else {"": "npu:0"} if is_npu_available else {"": 0}, peft_config=lora_config, quantization_config=nf4_config, reward_adapter=script_args.rm_adapter, use_safetensors=script_args.use_safetensors, ) tokenizer = AutoTokenizer.from_pretrained(script_args.model_name) tokenizer.pad_token = tokenizer.eos_token # Compute that only on the main process for faster data processing. # see: https://github.com/huggingface/trl/pull/1255 with PartialState().local_main_process_first(): dataset = create_and_prepare_dataset(tokenizer, script_args.dataset_num_proc) def collator(data): return {key: [d[key] for d in data] for key in data[0]} config = PPOConfig( model_name=script_args.model_name, log_with=script_args.log_with, learning_rate=1e-5, batch_size=8, mini_batch_size=2, gradient_accumulation_steps=2, optimize_cuda_cache=True, seed=script_args.seed, use_score_scaling=script_args.use_score_scaling, use_score_norm=script_args.use_score_norm, score_clip=script_args.score_clip, ) ppo_trainer = PPOTrainer( config, model, ref_model=None, tokenizer=tokenizer, dataset=dataset, data_collator=collator, ) generation_kwargs = { "top_k": 0.0, "top_p": 0.9, "do_sample": True, "pad_token_id": tokenizer.pad_token_id, "max_new_tokens": 32, } for _epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): question_tensors = batch["input_ids"] response_tensors = ppo_trainer.generate( question_tensors, return_prompt=False, **generation_kwargs, ) batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) # Compute reward score texts = [q + r for q, r in zip(batch["query"], batch["response"])] inputs = tokenizer(texts, padding=True, truncation=True, return_tensors="pt").to(ppo_trainer.accelerator.device) raw_rewards = ppo_trainer.accelerator.unwrap_model(ppo_trainer.model).compute_reward_score(**inputs) rewards = [raw_rewards[i, -1, 1] for i in range(len(raw_rewards))] # take last token # Run PPO step stats = ppo_trainer.step(question_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards)
trl/examples/scripts/ppo_multi_adapter.py/0
{ "file_path": "trl/examples/scripts/ppo_multi_adapter.py", "repo_id": "trl", "token_count": 2129 }
454
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import itertools import tempfile import unittest import torch from accelerate.utils.memory import release_memory from datasets import load_dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from trl import SFTConfig, SFTTrainer, is_peft_available from trl.models.utils import setup_chat_format from ..testing_utils import ( require_bitsandbytes, require_liger_kernel, require_peft, require_torch_gpu, require_torch_multi_gpu, ) from .testing_constants import DEVICE_MAP_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, MODELS_TO_TEST, PACKING_OPTIONS if is_peft_available(): from peft import LoraConfig, PeftModel @require_torch_gpu class SFTTrainerSlowTester(unittest.TestCase): def setUp(self): self.train_dataset = load_dataset("imdb", split="train[:10%]") self.eval_dataset = load_dataset("imdb", split="test[:10%]") self.dataset_text_field = "text" self.max_seq_length = 128 self.peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=8, bias="none", task_type="CAUSAL_LM", ) def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_str(self, model_name, packing): """ Simply tests if passing a simple str to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: args = SFTConfig( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer = SFTTrainer( model_name, args=args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_transformers(self, model_name, packing): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: args = SFTConfig( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_peft def test_sft_trainer_peft(self, model_name, packing): """ Simply tests if passing a transformers model + peft config to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: args = SFTConfig( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_transformers_mp(self, model_name, packing): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision. """ with tempfile.TemporaryDirectory() as tmp_dir: args = SFTConfig( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) def test_sft_trainer_transformers_mp_gc(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ with tempfile.TemporaryDirectory() as tmp_dir: args = SFTConfig( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) @require_peft def test_sft_trainer_transformers_mp_gc_peft(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model + PEFT to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ with tempfile.TemporaryDirectory() as tmp_dir: args = SFTConfig( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand( list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, DEVICE_MAP_OPTIONS)) ) @require_torch_multi_gpu def test_sft_trainer_transformers_mp_gc_device_map( self, model_name, packing, gradient_checkpointing_kwargs, device_map ): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing (single, multi-gpu, etc). """ with tempfile.TemporaryDirectory() as tmp_dir: args = SFTConfig( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) @require_peft @require_bitsandbytes def test_sft_trainer_transformers_mp_gc_peft_qlora(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model + PEFT + bnb to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ with tempfile.TemporaryDirectory() as tmp_dir: args = SFTConfig( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_peft @require_bitsandbytes def test_sft_trainer_with_chat_format_qlora(self, model_name, packing): """ Simply tests if using setup_chat_format with a transformers model + peft + bnb config to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: train_dataset = load_dataset("trl-internal-testing/dolly-chatml-sft", split="train") args = SFTConfig( packing=packing, max_seq_length=self.max_seq_length, output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, ) quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config) tokenizer = AutoTokenizer.from_pretrained(model_name) model, tokenizer = setup_chat_format(model, tokenizer) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=train_dataset, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_liger_kernel def test_sft_trainer_with_liger(self, model_name, packing): """ Tests if passing use_liger=True to SFTConfig loads and runs the trainer with AutoLigerKernelForCausalLM as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: args = SFTConfig( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=2, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, use_liger=True, ) trainer = SFTTrainer( model_name, args=args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) # check that the components of the trainer.model are monkey patched: self.assertTrue(any("Liger" in type(module).__name__ for module in trainer.model.model.modules())) trainer.train() release_memory(trainer.model, trainer)
trl/tests/slow/test_sft_slow.py/0
{ "file_path": "trl/tests/slow/test_sft_slow.py", "repo_id": "trl", "token_count": 7851 }
455
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from datasets import Dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from trl import KTOConfig, KTOTrainer from trl.trainer.kto_trainer import _get_kl_dataset, _process_tokens, _tokenize from .testing_utils import require_no_wandb, require_peft class KTOTrainerTester(unittest.TestCase): def setUp(self): self.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" self.model = AutoModelForCausalLM.from_pretrained(self.model_id) self.ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.tokenizer.pad_token = self.tokenizer.eos_token # get t5 as seq2seq example: model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab" self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) self.t5_ref_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) def _init_dummy_dataset(self): # fmt: off dummy_dataset_dict = { "prompt": [ "Hey, hello", "How are you", "What is your name?", "What is your name?", "Which is the best programming language?", "Which is the best programming language?", "Which is the best programming language?", ], "completion": [ "hi nice to meet you", "leave me alone", "I don't have a name", "My name is Mary", "Python", "C++", "Java", ], "label": [ True, False, False, True, True, False, False, ], } # fmt: on return Dataset.from_dict(dummy_dataset_dict) @parameterized.expand( [ ["gpt2", True, True], ["gpt2", True, False], # ["t5", True], ["gpt2", False, True], ["gpt2", False, False], # ["t5", False], ] ) def test_kto_trainer(self, name, pre_compute, eval_dataset): with tempfile.TemporaryDirectory() as tmp_dir: training_args = KTOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, precompute_ref_log_probs=pre_compute, report_to="none", ) dummy_dataset = self._init_dummy_dataset() if name == "gpt2": model = self.model ref_model = self.ref_model tokenizer = self.tokenizer elif name == "t5": model = self.t5_model ref_model = self.t5_ref_model tokenizer = self.t5_tokenizer trainer = KTOTrainer( model=model, ref_model=ref_model, args=training_args, tokenizer=tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset if eval_dataset else None, ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # check the params have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) # check the params have changed - ignore 0 biases if param.sum() != 0: self.assertFalse(torch.equal(param, new_param)) def test_tokenize_and_process_tokens(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = KTOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, report_to="none", ) dummy_dataset = self._init_dummy_dataset() trainer = KTOTrainer( model=self.model, ref_model=self.ref_model, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) with tempfile.TemporaryDirectory() as tmp_dir: tokenized_dataset = dummy_dataset.map( _tokenize, fn_kwargs={"tokenizer": trainer.tokenizer}, batched=True, batch_size=2, ) self.assertListEqual(tokenized_dataset["prompt"], dummy_dataset["prompt"]) self.assertListEqual(tokenized_dataset["completion"], dummy_dataset["completion"]) self.assertListEqual(tokenized_dataset["label"], dummy_dataset["label"]) self.assertListEqual(tokenized_dataset["prompt_input_ids"][0], [10814, 11]) self.assertListEqual(tokenized_dataset["prompt_attention_mask"][0], [1, 1]) self.assertListEqual(tokenized_dataset["answer_input_ids"][0], [5968, 1219, 72, 3621, 284, 1826, 345]) self.assertListEqual(tokenized_dataset["answer_attention_mask"][0], [1, 1, 1, 1, 1, 1, 1]) # Test corruption of (prompt, completion) pairs for KL dataset for batch_size in [2, 3]: tokenized_kl_dataset = tokenized_dataset.map(_get_kl_dataset, batched=True, batch_size=batch_size) # Verify that the "answer_input_ids" have been modified, meaning the new "answer_input_ids" differ # from the original ones. However, when the length of the dataset modulo batch_size equals 1, # the last batch remains unaltered. This is a rare scenario that does not impact the training # process, so we exclude it from testing by iterating only up to len - 1. for i in range(len(tokenized_kl_dataset["answer_input_ids"]) - 1): self.assertListEqual( tokenized_dataset["prompt_input_ids"][i], tokenized_kl_dataset["prompt_input_ids"][i], ) self.assertListEqual( tokenized_dataset["prompt_attention_mask"][i], tokenized_kl_dataset["prompt_attention_mask"][i], ) self.assertNotEqual( tokenized_dataset["answer_input_ids"][i], tokenized_kl_dataset["answer_input_ids"][i], ) self.assertNotEqual( tokenized_dataset["answer_attention_mask"][i], tokenized_kl_dataset["answer_attention_mask"][i], ) fn_kwargs = { "prefix": "", "is_encoder_decoder": trainer.is_encoder_decoder, "tokenizer": trainer.tokenizer, "max_length": trainer.max_length, "truncation_mode": trainer.truncation_mode, "label_pad_token_id": trainer.label_pad_token_id, "max_prompt_length": trainer.max_prompt_length, } processed_dataset = tokenized_dataset.map(_process_tokens, fn_kwargs=fn_kwargs, num_proc=2) self.assertListEqual(processed_dataset["prompt"], dummy_dataset["prompt"]) self.assertListEqual(processed_dataset["completion"], dummy_dataset["completion"]) self.assertListEqual(processed_dataset["label"], dummy_dataset["label"]) self.assertListEqual(processed_dataset["prompt_input_ids"][0], [50256, 10814, 11]) self.assertListEqual(processed_dataset["prompt_attention_mask"][0], [1, 1, 1]) self.assertListEqual( processed_dataset["completion_input_ids"][0], [50256, 10814, 11, 5968, 1219, 72, 3621, 284, 1826, 345, 50256], ) self.assertListEqual( processed_dataset["completion_attention_mask"][0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ) self.assertListEqual( processed_dataset["completion_labels"][0], [-100, -100, -100, 5968, 1219, 72, 3621, 284, 1826, 345, 50256], ) def test_kto_trainer_without_providing_ref_model(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = KTOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, report_to="none", ) dummy_dataset = self._init_dummy_dataset() trainer = KTOTrainer( model=self.model, ref_model=None, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # check the params have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) # check the params have changed - ignore 0 biases if param.sum() != 0: self.assertFalse(torch.equal(param, new_param)) @require_peft def test_kto_trainer_without_providing_ref_model_with_lora(self): from peft import LoraConfig lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) with tempfile.TemporaryDirectory() as tmp_dir: training_args = KTOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, report_to="none", ) dummy_dataset = self._init_dummy_dataset() trainer = KTOTrainer( model=self.model, ref_model=None, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, peft_config=lora_config, ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # check the params have changed for n, param in previous_trainable_params.items(): if "lora" in n: new_param = trainer.model.get_parameter(n) # check the params have changed - ignore 0 biases if param.sum() != 0: self.assertFalse(torch.equal(param, new_param)) @require_no_wandb def test_kto_trainer_generate_during_eval_no_wandb(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = KTOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", beta=0.1, generate_during_eval=True, report_to="none", ) dummy_dataset = self._init_dummy_dataset() with self.assertRaisesRegex( ValueError, expected_regex="`generate_during_eval=True` requires Weights and Biases to be installed." " Please install with `pip install wandb` to resolve.", ): KTOTrainer( model=self.model, ref_model=None, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) @require_peft def test_kto_lora_save(self): from peft import LoraConfig, get_peft_model lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) # lora model model = AutoModelForCausalLM.from_pretrained(self.model_id) model_peft = get_peft_model(model, lora_config) with tempfile.TemporaryDirectory() as tmp_dir: training_args = KTOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, eval_strategy="steps", beta=0.1, report_to="none", ) dummy_dataset = self._init_dummy_dataset() # kto train lora model with a lora config trainer = KTOTrainer( model=model_peft, ref_model=None, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, peft_config=lora_config, ) # train the model trainer.train() # save peft adapter trainer.save_model() # assert that the model is loaded without giving OSError try: AutoModelForCausalLM.from_pretrained(tmp_dir) except OSError: self.fail("Loading the saved peft adapter failed")
trl/tests/test_kto_trainer.py/0
{ "file_path": "trl/tests/test_kto_trainer.py", "repo_id": "trl", "token_count": 8590 }
456
# flake8: noqa __version__ = "0.11.0.dev0" from typing import TYPE_CHECKING from .import_utils import _LazyModule, is_diffusers_available, OptionalDependencyNotAvailable _import_structure = { "core": [ "set_seed", ], "environment": [ "TextEnvironment", "TextHistory", ], "extras": [ "BestOfNSampler", ], "import_utils": [ "is_bitsandbytes_available", "is_diffusers_available", "is_npu_available", "is_peft_available", "is_pil_available", "is_wandb_available", "is_xpu_available", "is_llmblender_available", "is_openai_available", "is_liger_available", ], "models": [ "AutoModelForCausalLMWithValueHead", "AutoModelForSeq2SeqLMWithValueHead", "PreTrainedModelWrapper", "create_reference_model", "setup_chat_format", "SUPPORTED_ARCHITECTURES", ], "trainer": [ "DataCollatorForCompletionOnlyLM", "DPOConfig", "DPOTrainer", "CPOConfig", "CPOTrainer", "AlignPropConfig", "AlignPropTrainer", "IterativeSFTTrainer", "KTOConfig", "KTOTrainer", "BCOConfig", "BCOTrainer", "ModelConfig", "OnlineDPOConfig", "OnlineDPOTrainer", "ORPOConfig", "ORPOTrainer", "PPOConfig", "PPOTrainer", "RewardConfig", "RewardTrainer", "SFTConfig", "SFTTrainer", "FDivergenceConstants", "FDivergenceType", "WinRateCallback", "BaseJudge", "BaseRankJudge", "BasePairwiseJudge", "RandomRankJudge", "RandomPairwiseJudge", "PairRMJudge", "HfPairwiseJudge", "OpenAIPairwiseJudge", ], "commands": [], "commands.cli_utils": ["init_zero_verbose", "SFTScriptArguments", "DPOScriptArguments", "TrlParser"], "trainer.callbacks": ["RichProgressCallback", "SyncRefModelCallback"], "trainer.utils": ["get_kbit_device_map", "get_peft_config", "get_quantization_config"], "multitask_prompt_tuning": [ "MultitaskPromptEmbedding", "MultitaskPromptTuningConfig", "MultitaskPromptTuningInit", ], } try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["models"].extend( [ "DDPOPipelineOutput", "DDPOSchedulerOutput", "DDPOStableDiffusionPipeline", "DefaultDDPOStableDiffusionPipeline", ] ) _import_structure["trainer"].extend(["DDPOConfig", "DDPOTrainer"]) if TYPE_CHECKING: from .core import set_seed from .environment import TextEnvironment, TextHistory from .extras import BestOfNSampler from .import_utils import ( is_bitsandbytes_available, is_diffusers_available, is_npu_available, is_peft_available, is_pil_available, is_wandb_available, is_xpu_available, is_llmblender_available, is_openai_available, is_liger_available, ) from .models import ( AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PreTrainedModelWrapper, create_reference_model, setup_chat_format, SUPPORTED_ARCHITECTURES, ) from .trainer import ( DataCollatorForCompletionOnlyLM, DPOConfig, DPOTrainer, CPOConfig, CPOTrainer, AlignPropConfig, AlignPropTrainer, IterativeSFTTrainer, KTOConfig, KTOTrainer, BCOConfig, BCOTrainer, ModelConfig, OnlineDPOConfig, OnlineDPOTrainer, ORPOConfig, ORPOTrainer, PPOConfig, PPOTrainer, RewardConfig, RewardTrainer, SFTConfig, SFTTrainer, FDivergenceConstants, FDivergenceType, WinRateCallback, BaseJudge, BaseRankJudge, BasePairwiseJudge, RandomRankJudge, RandomPairwiseJudge, PairRMJudge, HfPairwiseJudge, OpenAIPairwiseJudge, ) from .trainer.callbacks import RichProgressCallback, SyncRefModelCallback from .trainer.utils import get_kbit_device_map, get_peft_config, get_quantization_config from .commands.cli_utils import init_zero_verbose, SFTScriptArguments, DPOScriptArguments, TrlParser try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .models import ( DDPOPipelineOutput, DDPOSchedulerOutput, DDPOStableDiffusionPipeline, DefaultDDPOStableDiffusionPipeline, ) from .trainer import DDPOConfig, DDPOTrainer else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, extra_objects={"__version__": __version__}, )
trl/trl/__init__.py/0
{ "file_path": "trl/trl/__init__.py", "repo_id": "trl", "token_count": 2521 }
457
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM from ..import_utils import is_npu_available, is_xpu_available from .modeling_base import PreTrainedModelWrapper class ValueHead(nn.Module): r""" The ValueHead class implements a head for GPT2 that returns a scalar for each output token. """ def __init__(self, config, **kwargs): super().__init__() if not hasattr(config, "summary_dropout_prob"): summary_dropout_prob = kwargs.pop("summary_dropout_prob", 0.1) else: summary_dropout_prob = config.summary_dropout_prob self.dropout = nn.Dropout(summary_dropout_prob) if summary_dropout_prob else nn.Identity() # some models such as OPT have a projection layer before the word embeddings - e.g. OPT-350m if hasattr(config, "hidden_size"): hidden_size = config.hidden_size if hasattr(config, "word_embed_proj_dim"): hidden_size = config.word_embed_proj_dim elif hasattr(config, "is_encoder_decoder"): if config.is_encoder_decoder and hasattr(config, "decoder"): if hasattr(config.decoder, "hidden_size"): hidden_size = config.decoder.hidden_size self.summary = nn.Linear(hidden_size, 1) self.flatten = nn.Flatten() def forward(self, hidden_states): output = self.dropout(hidden_states) # For now force upcast in fp32 if needed. Let's keep the # output in fp32 for numerical stability. if output.dtype != self.summary.weight.dtype: output = output.to(self.summary.weight.dtype) output = self.summary(output) return output class AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper): r""" An autoregressive model with a value head in addition to the language model head. This class inherits from `~trl.PreTrainedModelWrapper` and wraps a `transformers.PreTrainedModel` class. The wrapper class supports classic functions such as `from_pretrained`, `push_to_hub` and `generate`. To call a method of the wrapped model, simply manipulate the `pretrained_model` attribute of this class. Class attributes: - **transformers_parent_class** (`transformers.PreTrainedModel`) -- The parent class of the wrapped model. This should be set to `transformers.AutoModelForCausalLM` for this class. - **lm_head_namings** (`tuple`) -- A tuple of strings that are used to identify the language model head of the wrapped model. This is set to `("lm_head", "embed_out")` for this class but can be changed for other models in the future - **supported_args** (`tuple`) -- A tuple of strings that are used to identify the arguments that are supported by the `ValueHead` class. Currently, the supported args are: - **summary_dropout_prob** (`float`, `optional`, defaults to `None`) -- The dropout probability for the `ValueHead` class. - **v_head_initializer_range** (`float`, `optional`, defaults to `0.2`) -- The initializer range for the `ValueHead` if a specific initialization strategy is selected. - **v_head_init_strategy** (`str`, `optional`, defaults to `None`) -- The initialization strategy for the `ValueHead`. Currently, the supported strategies are: - **`None`** -- Initializes the weights of the `ValueHead` with a random distribution. This is the default strategy. - **"normal"** -- Initializes the weights of the `ValueHead` with a normal distribution. """ transformers_parent_class = AutoModelForCausalLM lm_head_namings = ["lm_head", "embed_out"] supported_args = ( "summary_dropout_prob", "v_head_initializer_range", "v_head_init_strategy", ) def __init__(self, pretrained_model, **kwargs): r""" Initializes the model. Args: pretrained_model (`transformers.PreTrainedModel`): The model to wrap. It should be a causal language model such as GPT2. or any model mapped inside the `AutoModelForCausalLM` class. kwargs (`dict`, `optional`): Additional keyword arguments, that are passed to the `ValueHead` class. """ super().__init__(pretrained_model, **kwargs) v_head_kwargs, _, _ = self._split_kwargs(kwargs) if not any(hasattr(self.pretrained_model, attribute) for attribute in self.lm_head_namings): raise ValueError("The model does not have a language model head, please use a model that has one.") self.v_head = ValueHead(self.pretrained_model.config, **v_head_kwargs) self._init_weights(**v_head_kwargs) def _init_weights(self, **kwargs): r""" Initializes the weights of the value head. The default initialization strategy is random. Users can pass a different initialization strategy by passing the `v_head_init_strategy` argument when calling `.from_pretrained`. Supported strategies are: - `normal`: initializes the weights with a normal distribution. Args: **kwargs (`dict`, `optional`): Additional keyword arguments, that are passed to the `ValueHead` class. These arguments can contain the `v_head_init_strategy` argument as well as the `v_head_initializer_range` argument. """ initializer_range = kwargs.pop("v_head_initializer_range", 0.2) # random init by default init_strategy = kwargs.pop("v_head_init_strategy", None) if init_strategy is None: # do nothing pass elif init_strategy == "normal": self.v_head.summary.weight.data.normal_(mean=0.0, std=initializer_range) self.v_head.summary.bias.data.zero_() def forward( self, input_ids=None, past_key_values=None, attention_mask=None, return_past_key_values=False, **kwargs, ): r""" Applies a forward pass to the wrapped model and returns the logits of the value head. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. past_key_values (`tuple(tuple(torch.FloatTensor))`, `optional`): Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` input) to speed up sequential decoding. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. return_past_key_values (bool): A flag indicating if the computed hidden-states should be returned. kwargs (`dict`, `optional`): Additional keyword arguments, that are passed to the wrapped model. """ kwargs["output_hidden_states"] = True # this had already been set in the LORA / PEFT examples kwargs["past_key_values"] = past_key_values if self.is_peft_model and self.pretrained_model.active_peft_config.peft_type == "PREFIX_TUNING": kwargs.pop("past_key_values") base_model_output = self.pretrained_model( input_ids=input_ids, attention_mask=attention_mask, **kwargs, ) last_hidden_state = base_model_output.hidden_states[-1] lm_logits = base_model_output.logits loss = base_model_output.loss if last_hidden_state.device != self.v_head.summary.weight.device: last_hidden_state = last_hidden_state.to(self.v_head.summary.weight.device) value = self.v_head(last_hidden_state).squeeze(-1) # force upcast in fp32 if logits are in half-precision if lm_logits.dtype != torch.float32: lm_logits = lm_logits.float() if return_past_key_values: return (lm_logits, loss, value, base_model_output.past_key_values) else: return (lm_logits, loss, value) def generate(self, *args, **kwargs): r""" A simple wrapper around the `generate` method of the wrapped model. Please refer to the [`generate`](https://huggingface.co/docs/transformers/internal/generation_utils) method of the wrapped model for more information about the supported arguments. Args: *args (`list`, *optional*): Positional arguments passed to the `generate` method of the wrapped model. **kwargs (`dict`, *optional*): Keyword arguments passed to the `generate` method of the wrapped model. """ return self.pretrained_model.generate(*args, **kwargs) def state_dict(self, *args, **kwargs): r""" Returns the state dictionary of the model. We add the state dictionary of the value head to the state dictionary of the wrapped model by prepending the key with `v_head.`. """ if not self.is_peft_model: pretrained_model_state_dict = self.pretrained_model.state_dict(*args, **kwargs) else: # if it is a peft model, only save the v_head pretrained_model_state_dict = {} v_head_state_dict = self.v_head.state_dict(*args, **kwargs) for k, v in v_head_state_dict.items(): pretrained_model_state_dict[f"v_head.{k}"] = v return pretrained_model_state_dict def push_to_hub(self, *args, **kwargs): self.pretrained_model.v_head = self.v_head return self.pretrained_model.push_to_hub(*args, **kwargs) def post_init(self, state_dict): r""" We add the state dictionary of the value head to the state dictionary of the wrapped model by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the keys of the value head state dictionary. """ for k in list(state_dict.keys()): if "v_head." in k: state_dict[k.replace("v_head.", "")] = state_dict.pop(k) self.v_head.load_state_dict(state_dict, strict=False) del state_dict if hasattr(self.pretrained_model, "hf_device_map"): if ( "cpu" in self.pretrained_model.hf_device_map.values() or "disk" in self.pretrained_model.hf_device_map.values() ): raise ValueError( "The model is offloaded on CPU or disk - CPU & disk offloading is not supported for ValueHead models." ) first_device = list(set(self.pretrained_model.hf_device_map.values()))[0] if isinstance(first_device, int): if is_npu_available(): first_device = f"npu:{first_device}" elif is_xpu_available(): first_device = f"xpu:{first_device}" else: first_device = f"cuda:{first_device}" self.v_head = self.v_head.to(first_device) def set_device_hook(module, input, outputs): new_output = () for output in outputs: if isinstance(output, torch.Tensor): new_output += (output.to(first_device),) else: new_output += (output,) return new_output self.register_forward_hook(set_device_hook) self.is_sequential_parallel = True class AutoModelForSeq2SeqLMWithValueHead(PreTrainedModelWrapper): r""" A seq2seq model with a value head in addition to the language model head. This class inherits from `~trl.PreTrainedModelWrapper` and wraps a `transformers.PreTrainedModel` class. The wrapper class supports classic functions such as `from_pretrained` and `push_to_hub` and also provides some additional functionalities such as `generate`. Args: pretrained_model (`transformers.PreTrainedModel`): The model to wrap. It should be a causal language model such as GPT2. or any model mapped inside the `AutoModelForSeq2SeqLM` class. kwargs: Additional keyword arguments passed along to the `ValueHead` class. """ transformers_parent_class = AutoModelForSeq2SeqLM lm_head_namings = ["lm_head", "embed_out", "output_projection"] supported_args = ( "summary_dropout_prob", "v_head_initializer_range", "v_head_init_strategy", ) def __init__(self, pretrained_model, **kwargs): super().__init__(pretrained_model, **kwargs) v_head_kwargs, _, _ = self._split_kwargs(kwargs) self.is_encoder_decoder = True if not self._has_lm_head(): raise ValueError("The model does not have a language model head, please use a model that has one.") self.v_head = ValueHead(self.pretrained_model.config, **v_head_kwargs) self._init_weights(**v_head_kwargs) def _has_lm_head(self): # check module names of all modules inside `pretrained_model` to find the language model head for name, _module in self.pretrained_model.named_modules(): if any(attribute in name for attribute in self.lm_head_namings): return True return False def post_init(self, state_dict): r""" We add the state dictionary of the value head to the state dictionary of the wrapped model by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the keys of the value head state dictionary. """ for k in list(state_dict.keys()): if "v_head." in k: state_dict[k.replace("v_head.", "")] = state_dict.pop(k) self.v_head.load_state_dict(state_dict, strict=False) del state_dict if hasattr(self.pretrained_model, "hf_device_map"): if ( "cpu" in self.pretrained_model.hf_device_map.values() or "disk" in self.pretrained_model.hf_device_map.values() ): raise ValueError( "The model is offloaded on CPU or disk - CPU & disk offloading is not supported for ValueHead models." ) # get the lm_head device for name, module in self.pretrained_model.named_modules(): if any(attribute in name for attribute in self.lm_head_namings): lm_head_device = module.weight.device break # put v_head on the same device as the lm_head to avoid issues self.v_head = self.v_head.to(lm_head_device) def set_device_hook(module, input, outputs): r""" A hook that sets the device of the output of the model to the device of the first parameter of the model. Args: module (`nn.Module`): The module to which the hook is attached. input (`tuple`): The input to the module. outputs (`tuple`): The output of the module. """ new_output = () for output in outputs: if isinstance(output, torch.Tensor): new_output += (output.to(lm_head_device),) else: new_output += (output,) return new_output self.register_forward_hook(set_device_hook) self.is_sequential_parallel = True def state_dict(self, *args, **kwargs): r""" Returns the state dictionary of the model. We add the state dictionary of the value head to the state dictionary of the wrapped model by prepending the key with `v_head.`. """ if not self.is_peft_model: pretrained_model_state_dict = self.pretrained_model.state_dict(*args, **kwargs) else: # if it is a peft model, only save the v_head pretrained_model_state_dict = {} v_head_state_dict = self.v_head.state_dict(*args, **kwargs) for k, v in v_head_state_dict.items(): pretrained_model_state_dict[f"v_head.{k}"] = v return pretrained_model_state_dict def push_to_hub(self, *args, **kwargs): self.pretrained_model.v_head = self.v_head return self.pretrained_model.push_to_hub(*args, **kwargs) def _init_weights(self, **kwargs): r""" We initialize the weights of the value head. """ initializer_range = kwargs.pop("v_head_initializer_range", 0.2) # random init by default init_strategy = kwargs.pop("v_head_init_strategy", None) if init_strategy is None: # do nothing pass elif init_strategy == "normal": self.v_head.summary.weight.data.normal_(mean=0.0, std=initializer_range) self.v_head.summary.bias.data.zero_() def forward( self, input_ids=None, past_key_values=None, attention_mask=None, return_past_key_values=False, **kwargs, ): kwargs["past_key_values"] = past_key_values if self.is_peft_model and self.pretrained_model.active_peft_config.peft_type == "PREFIX_TUNING": kwargs.pop("past_key_values") base_model_output = self.pretrained_model( input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, # We force the model to output hidden states **kwargs, ) last_hidden_state = base_model_output.decoder_hidden_states[-1] lm_logits = base_model_output.logits loss = base_model_output.loss value = self.v_head(last_hidden_state).squeeze(-1) # force upcast in fp32 if logits are in half-precision if lm_logits.dtype != torch.float32: lm_logits = lm_logits.float() if return_past_key_values: return (lm_logits, loss, value, base_model_output.past_key_values) else: return (lm_logits, loss, value) def generate(self, *args, **kwargs): r""" We call `generate` on the wrapped model. """ return self.pretrained_model.generate(*args, **kwargs)
trl/trl/models/modeling_value_head.py/0
{ "file_path": "trl/trl/models/modeling_value_head.py", "repo_id": "trl", "token_count": 8388 }
458
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from functools import wraps from typing import Callable, Dict, List, Optional, Tuple, Union import torch from datasets import Dataset from torch.utils.data import DataLoader from transformers import ( DataCollator, DataCollatorForLanguageModeling, DataCollatorForSeq2Seq, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments, ) from transformers.trainer_utils import EvalLoopOutput from ..core import PPODecorators from ..import_utils import is_peft_available from .utils import trl_sanitze_kwargs_for_tagging if is_peft_available(): from peft import PeftModel class IterativeSFTTrainer(Trainer): """ The IterativeSFTTrainer can be used to finetune models with methods that requires some steps between optimization. Attributes: **model** (`PreTrainedModel`) -- Model to be optimized, either an 'AutoModelForCausalLM' or an 'AutoModelForSeq2SeqLM'. Check the documentation of `PreTrainedModel` for more details. **args** (`transformers.TrainingArguments`): -- The arguments to use for training. **tokenizer** (`PreTrainedTokenizerBase`) -- Tokenizer to be used for encoding the data. Check the documentation of `transformers.PreTrainedTokenizer` and `transformers.PreTrainedTokenizerFast` for more details. **optimizers** (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): -- The optimizer and scheduler to use for training. **data_collator** (Union[DataCollatorForLanguageModeling, DataCollatorForSeq2Seq], *optional*) -- Data collator to be used for training and passed along the dataloader. **eval_dataset** (`datasets.Dataset`): The dataset to use for evaluation. **max_length** (`int`, defaults to `None`): -- The maximum length of the input. **truncation_mode** (`str`, defaults to `keep_end`): -- The truncation mode to use, either `keep_end` or `keep_start`. **preprocess_logits_for_metrics** (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): -- The function to use to preprocess the logits before computing the metrics. **compute_metrics** (`Callable[[EvalPrediction], Dict]`, *optional*): -- The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to metric values. **optimize_device_cache ** (`bool`, *optional*, defaults to `False`) -- Optimize CUDA cache for slightly more memory-efficient training. """ _tag_names = ["trl", "iterative-sft"] def __init__( self, model: Optional[PreTrainedModel] = None, args: Optional[TrainingArguments] = None, tokenizer: Optional[PreTrainedTokenizerBase] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = ( None, None, ), data_collator: Optional[DataCollator] = None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, max_length: Optional[int] = None, truncation_mode: Optional[str] = "keep_end", preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]] = None, optimize_device_cache: Optional[bool] = False, ): # Step 0: check positional arguments validity if not isinstance(tokenizer, (PreTrainedTokenizerBase)): raise ValueError( f"tokenizer must be a PreTrainedTokenizerBase like a PreTrainedTokenizer or a PreTrainedTokenizerFast, got {type(tokenizer)}" ) if not isinstance(model, PreTrainedModel): raise ValueError(f"model must be a PreTrainedModel, got {type(model)}") if not model.can_generate(): warnings.warn( f"The current model class {type(model)} is not compatible with `.generate()`" "Please make sure that this is intended." ) if optimizers[1] is None and args.max_steps == -1: raise ValueError( "When no scheduler is provided, you need to set the total number of training steps to perform `max_steps`" ) self.is_encoder_decoder = getattr(model.config, "is_encoder_decoder", False) self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) self.tokenizer = tokenizer if data_collator is None: if self.is_encoder_decoder: warnings.warn( "No data collator is provided. Using 'DataCollatorForSeq2Seq' with" "'labels_pad_token_id' set to '-100' and 'pad_to_multiple_of' set to 8." ) self.data_collator = DataCollatorForSeq2Seq(tokenizer, label_pad_token_id=-100, pad_to_multiple_of=8) else: warnings.warn("No data collator is provided. Using 'DataCollatorForLanguageModeling'") self.data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False) else: self.data_collator = data_collator self.max_length = max_length self.truncation_mode = truncation_mode self.optimize_device_cache = optimize_device_cache super().__init__( model=model, args=args, data_collator=self.data_collator, eval_dataset=eval_dataset, tokenizer=tokenizer, compute_metrics=compute_metrics, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) self.create_optimizer_and_scheduler(self.args.max_steps) # prepare model, optimizer and lr_scheduler self.model, self.optimizer, self.lr_scheduler = self.accelerator.prepare( self.model, self.optimizer, self.lr_scheduler ) self.tokenizer.truncation_side = "left" if self.truncation_mode == "keep_end" else "right" if not hasattr(self, "accelerator"): raise AttributeError( "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`." ) PPODecorators.optimize_device_cache = self.optimize_device_cache def prepare_model_inputs(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, labels: torch.Tensor): if attention_mask is None: attention_mask = [torch.ones_like(ids) for ids in input_ids] if self.is_encoder_decoder: input_data = self.data_collator( [ {"input_ids": ids, "attention_mask": att, "labels": lab} for ids, att, lab in zip(input_ids, attention_mask, labels) ] ).to(self.model.device) input_data.pop("decoder_input_ids", None) # This is directly computed inside the model input_data["labels"][input_data["labels"] == self.tokenizer.pad_token_id] = -100 else: input_data = self.data_collator( [{"input_ids": ids, "attention_mask": att} for ids, att in zip(input_ids, attention_mask)] ).to(self.model.device) # truncate in case the user has provided input_ids, attention_mask and labels if self.max_length is not None: if self.truncation_mode == "keep_start": input_data = {k: v[: self.max_length] for k, v in input_data.items()} elif self.truncation_mode == "keep_end": input_data = {k: v[-self.max_length :] for k, v in input_data.items()} else: raise ValueError(f"Unknown truncation mode: {self.truncation_mode}") return input_data @staticmethod def _step_safety_checker( input_ids: List[torch.LongTensor], attention_mask: List[torch.LongTensor], labels: List[torch.LongTensor], texts: List[str], texts_labels: List[str], ): """ Check if the input data is valid for training. Args: input_ids (List[`torch.LongTensor`]): List of tensors containing the input_ids attention_mask (List[`torch.LongTensor`]): List of tensors containing the attention_mask labels (List[`torch.FloatTensor`]): List of tensors containing the labels texts (List[`str`]): List of string containing the text input. texts_labels (List[`str`]): List of string containing the text labels. Returns: `tuple`: The input data. """ if texts is None: if attention_mask is None: for name, tensor_list in zip(["input_ids", "labels"], [input_ids, labels]): if not isinstance(tensor_list, list): raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}") if not isinstance(tensor_list[0], torch.Tensor): raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}") else: for name, tensor_list in zip( ["input_ids", "attention_mask", "labels"], [input_ids, attention_mask, labels] ): if not isinstance(tensor_list, list): raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}") if not isinstance(tensor_list[0], torch.Tensor): raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}") else: if not isinstance(texts, list): raise ValueError(f"'text' must be a list of strings - got {type(texts)}") if not isinstance(texts[0], str): raise ValueError(f"Elements in 'text' must be strings - got {type(texts[0])}") if texts_labels is not None: if not isinstance(texts_labels, list): raise ValueError(f"'text_labels' must be a list of strings - got {type(texts_labels)}") if not isinstance(texts_labels[0], str): raise ValueError(f"Elements in 'text_labels' must be strings - got {type(texts_labels[0])}") return input_ids, attention_mask, labels, texts, texts_labels @PPODecorators.empty_device_cache() def step( self, input_ids: Optional[List[torch.LongTensor]] = None, attention_mask: Optional[List[torch.LongTensor]] = None, labels: Optional[List[torch.LongTensor]] = None, texts: Optional[List[str]] = None, texts_labels: Optional[List[str]] = None, ): """ Run an optimisation step given a list of input_ids, attention_mask, and labels or a list of text and text_labels. Args: input_ids (List[`torch.LongTensor`]): List of tensors containing the input_ids (if not provided, text will be used) attention_mask (List[`torch.LongTensor`], , *optional*): List of tensors containing the attention_mask labels (List[`torch.FloatTensor`], *optional*): List of tensors containing the labels (if set to None, will default to input_ids) texts (List[`str`], *optional*): List of strings containing the text input (if not provided, input_ids will directly be used) texts_labels (List[`str`], *optional*): List of strings containing the text labels (if set to None, will default to text) Returns: `dict[str, Any]`: A summary of the training statistics """ self.model.train() if self.state.global_step == 0: self.tr_loss = torch.tensor(0.0).to(self.args.device) self._globalstep_last_logged = self.state.global_step if input_ids is None and texts is None: raise ValueError("Step should include `input_ids` or `texts` as keyword arguments.") elif input_ids is not None and texts is not None: warnings.warn( "Both 'input_ids' and 'texts' are provided. 'input_ids' will be overwritten using inputs provided by the 'texts' keyword argument." ) if labels is None and texts_labels is None and self.is_encoder_decoder: raise ValueError( "No 'labels' or 'text_labels' are provided. When using an encoder-decoder architecture, 'labels' or 'text_labels' must be passed." ) input_ids, attention_mask, labels, texts, texts_labels = self._step_safety_checker( input_ids, attention_mask, labels, texts, texts_labels ) if texts is not None: model_inputs = self.tokenizer( texts, max_length=self.max_length, truncation=True, padding=True, return_tensors="pt" ) input_ids, attention_mask = model_inputs["input_ids"], model_inputs["attention_mask"] if texts_labels is not None: labels = self.tokenizer( texts, max_length=self.max_length, truncation=True, padding=True, return_tensors="pt" )["input_ids"] if labels is None: warnings.warn("No labels are provided. Setting labels to input_ids") labels = input_ids model_inputs = self.prepare_model_inputs(input_ids, attention_mask, labels) model_inputs_names = list(model_inputs.keys()) batch_dict = {} batch_dict.update(model_inputs) def collator(data): return_dict = dict() for key in data[0]: if key in ["input_ids", "attention_mask", "labels"]: return_dict[key] = torch.stack([d[key] for d in data]).to(self.model.device) return return_dict batch_data = Dataset.from_dict(batch_dict) batch_data.set_format("torch") step_dataloader = DataLoader( batch_data, batch_size=self.args.per_device_train_batch_size, shuffle=True, collate_fn=collator, ) for _, batch in enumerate(step_dataloader): with self.accelerator.accumulate(self.model): model_inputs = {k: batch[k] for k in model_inputs_names} loss = self.compute_loss(self.model, model_inputs) if self.args.n_gpu > 1: loss = loss.mean() tr_loss_step = loss.detach() self.accelerator.backward(loss) if self.accelerator.sync_gradients and self.args.max_grad_norm is not None: self.accelerator.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm, ) self.optimizer.step() self.optimizer.zero_grad() if self.lr_scheduler is not None: self.lr_scheduler.step() self.state.global_step += 1 # update stats etc self.tr_loss += tr_loss_step self._maybe_log_save_evaluate() def _maybe_log_save_evaluate(self): # check if eval is required if self.args.eval_steps is not None: if self.state.global_step % self.args.eval_steps == 0 and self.state.global_step != 0: self.evaluate(self.eval_dataset) # check if logging is required if self.args.logging_steps is not None: if self.state.global_step % self.args.logging_steps == 0 and self.state.global_step != 0: logs: Dict[str, float] = {} tr_loss_scalar = self._nested_gather(self.tr_loss).mean().item() # reset tr_loss to zero self.tr_loss -= self.tr_loss logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) logs["learning_rate"] = self._get_learning_rate() self._globalstep_last_logged = self.state.global_step self.log(logs) @wraps(Trainer.push_to_hub) def push_to_hub( self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs, ) -> str: """ Overwrite the `push_to_hub` method in order to force-add the tag "iterative-sft" when pushing the model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details. Unlike the parent class, we don't use the `token` argument to mitigate security risks. """ kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs)
trl/trl/trainer/iterative_sft_trainer.py/0
{ "file_path": "trl/trl/trainer/iterative_sft_trainer.py", "repo_id": "trl", "token_count": 7768 }
459
import gc import math import os import time from collections import defaultdict from functools import wraps from typing import Dict, List, Optional, Tuple, Union import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from accelerate import Accelerator from accelerate.utils import broadcast, gather_object from datasets import Dataset from torch.utils.data import DataLoader from transformers import ( DataCollatorWithPadding, GenerationConfig, PreTrainedTokenizer, Trainer, TrainerCallback, TrainerControl, ) from transformers.integrations import get_reporting_integration_callbacks from transformers.trainer import DEFAULT_CALLBACKS, DEFAULT_PROGRESS_CALLBACK from transformers.trainer_callback import CallbackHandler, PrinterCallback from ..models.utils import unwrap_model_for_generation from ..trainer.utils import ( OnlineTrainerState, batch_generation, disable_dropout_in_model, exact_div, first_true_indices, forward, get_reward, prepare_deepspeed, print_rich_table, truncate_response, ) from .rloo_config import RLOOConfig from .utils import trl_sanitze_kwargs_for_tagging INVALID_LOGPROB = 1.0 class RLOOTrainer(Trainer): _tag_names = ["trl", "rloo"] def __init__( self, config: RLOOConfig, tokenizer: PreTrainedTokenizer, policy: nn.Module, ref_policy: nn.Module, reward_model: nn.Module, train_dataset: Dataset, data_collator: Optional[DataCollatorWithPadding] = None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, # less commonly used optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), callbacks: Optional[List[TrainerCallback]] = None, ) -> None: self.args = config args = config self.tokenizer = tokenizer self.policy = policy self.policy.generation_config.eos_token_id = ( None # disable `pad_token_id` and `eos_token_id` because we just want to ) self.policy.generation_config.pad_token_id = None # generate tokens without truncation / padding self.ref_policy = ref_policy self.reward_model = reward_model self.train_dataset = train_dataset self.train_dataset_len = len(train_dataset) self.data_collator = data_collator self.eval_dataset = eval_dataset self.optimizer, self.lr_scheduler = optimizers ######### # calculate various batch sizes ######### if args.total_episodes is None: # allow the users to define episodes in terms of epochs. args.total_episodes = int(args.num_train_epochs * self.train_dataset_len) accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps) self.accelerator = accelerator args.world_size = accelerator.num_processes args.local_batch_size = ( args.per_device_train_batch_size * args.gradient_accumulation_steps * args.num_mini_batches ) args.micro_batch_size = int(args.per_device_train_batch_size * args.world_size) args.batch_size = int(args.local_batch_size * args.world_size) args.mini_batch_size = exact_div( args.batch_size, args.num_mini_batches, "`batch_size` must be a multiple of `num_mini_batches`" ) args.local_mini_batch_size = exact_div( args.local_batch_size, args.num_mini_batches, "`local_batch_size` must be a multiple of `num_mini_batches`" ) args.num_total_batches = math.ceil( args.total_episodes / args.batch_size ) # we may train for more than `total_episodes` time_tensor = torch.tensor(int(time.time()), device=accelerator.device) time_int = broadcast(time_tensor, 0).item() # avoid different timestamps across processes args.run_name = f"{args.exp_name}__{args.seed}__{time_int}" self.local_seed = args.seed + accelerator.process_index * 100003 # Prime if args.num_sample_generations > 0: self.sample_generations_freq = max(1, args.num_total_batches // args.num_sample_generations) self.local_dataloader_batch_size = exact_div( args.local_batch_size, args.rloo_k, "`local_batch_size` must be a multiple of rloo_k" ) # RLOO logic: needed because RLOO repeats the same prompt args.rloo_k times ######### # setup model, optimizer, and others ######### for module in [policy, ref_policy, reward_model]: disable_dropout_in_model(module) if args.stop_token and args.stop_token == "eos": args.stop_token_id = tokenizer.eos_token_id self.model = policy self.create_optimizer_and_scheduler( num_training_steps=args.num_total_batches ) # note that we are calling `self.lr_scheduler.step()` manually only at the batch level ######### ### trainer specifics ######### self.state = OnlineTrainerState( is_local_process_zero=self.is_local_process_zero(), is_world_process_zero=self.is_world_process_zero(), ) default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) self.callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks self.callback_handler = CallbackHandler( self.callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler ) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) self.control = TrainerControl() self.current_flos = 0 self.hp_search_backend = None self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None # Create distant repo and output directory if needed self.hub_model_id = None if self.args.push_to_hub: self.init_hf_repo() if self.args.should_save: os.makedirs(self.args.output_dir, exist_ok=True) self.backup_model = None ######### ### setup dataloader ######### self.dataloader = DataLoader( self.train_dataset, batch_size=self.local_dataloader_batch_size, shuffle=True, collate_fn=DataCollatorWithPadding(tokenizer), drop_last=True, # needed; otherwise the last batch will be of ragged shape ) # sync random states for DataLoader(shuffle=True) before `accelerator.prepare` # see https://gist.github.com/vwxyzjn/2581bff1e48e185e0b85b6dfe1def79c torch.manual_seed(args.seed) self.model, self.optimizer, self.dataloader = accelerator.prepare(self.model, self.optimizer, self.dataloader) torch.manual_seed(self.local_seed) # reset the local seed again self.eval_dataloader = DataLoader( self.eval_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=DataCollatorWithPadding(self.tokenizer), drop_last=True, ) # no need to shuffle eval dataset self.eval_dataloader = accelerator.prepare(self.eval_dataloader) if self.is_deepspeed_enabled: self.reward_model = prepare_deepspeed( self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16 ) self.ref_policy = prepare_deepspeed( self.ref_policy, args.per_device_train_batch_size, args.fp16, args.bf16 ) self.deepspeed = self.model else: self.ref_policy = self.ref_policy.to(self.accelerator.device) self.reward_model = self.reward_model.to(self.accelerator.device) def get_train_dataloader(self) -> DataLoader: return self.dataloader def get_eval_dataloader(self) -> DataLoader: return self.eval_dataloader def train(self): args = self.args accelerator = self.accelerator optimizer = self.optimizer model = self.model self.model_wrapped = self.model ref_policy = self.ref_policy reward_model = self.reward_model tokenizer = self.tokenizer dataloader = self.dataloader device = accelerator.device def repeat_generator(): while True: yield from dataloader iter_dataloader = iter(repeat_generator()) generation_config = GenerationConfig( max_new_tokens=args.response_length, min_new_tokens=args.response_length, temperature=(args.temperature + 1e-7), top_k=0.0, top_p=1.0, do_sample=True, ) accelerator.print("===training policy===") start_time = time.time() stats_shape = (args.num_ppo_epochs, args.num_mini_batches, args.gradient_accumulation_steps) approxkl_stats = torch.zeros(stats_shape, device=device) pg_clipfrac_stats = torch.zeros(stats_shape, device=device) pg_loss_stats = torch.zeros(stats_shape, device=device) vf_loss_stats = torch.zeros(stats_shape, device=device) vf_clipfrac_stats = torch.zeros(stats_shape, device=device) entropy_stats = torch.zeros(stats_shape, device=device) ratio_stats = torch.zeros(stats_shape, device=device) model.train() # trainer state initialization self.state.global_step = 0 self.state.episode = 0 self.state.max_steps = args.num_total_batches * args.num_mini_batches self.state.num_train_epochs = args.total_episodes / self.train_dataset_len # Compute absolute values for logging, eval, and save if given as ratio if args.logging_steps is not None: if args.logging_steps < 1: self.state.logging_steps = math.ceil(self.state.max_steps * args.logging_steps) else: self.state.logging_steps = args.logging_steps if args.eval_steps is not None: if args.eval_steps < 1: self.state.eval_steps = math.ceil(self.state.max_steps * args.eval_steps) else: self.state.eval_steps = args.eval_steps if args.save_steps is not None: if args.save_steps < 1: self.state.save_steps = math.ceil(self.state.max_steps * args.save_steps) else: self.state.save_steps = args.save_steps self.control = self.callback_handler.on_train_begin(args, self.state, self.control) for update in range(1, args.num_total_batches + 1): self.state.episode += 1 * args.batch_size data = next(iter_dataloader) with torch.no_grad(): queries = data["input_ids"].to(device) queries = queries.repeat(args.rloo_k, 1) context_length = queries.shape[1] query_responses = [] responses = [] postprocessed_responses = [] logprobs = [] ref_logprobs = [] scores = [] sequence_lengths = [] with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: query_responses, logitss = batch_generation( unwrapped_model, queries, args.local_rollout_forward_batch_size, tokenizer.pad_token_id, generation_config, ) for i in range(0, queries.shape[0], args.local_rollout_forward_batch_size): query = queries[i : i + args.local_rollout_forward_batch_size] query_response = query_responses[i : i + args.local_rollout_forward_batch_size] response = query_response[:, context_length:] logits = logitss[i : i + args.local_rollout_forward_batch_size] all_logprob = F.log_softmax(logits, dim=-1) logprob = torch.gather(all_logprob, 2, response.unsqueeze(-1)).squeeze(-1) del logits, all_logprob torch.cuda.empty_cache() ref_output = forward(ref_policy, query_response, tokenizer.pad_token_id) ref_logits = ref_output.logits[:, context_length - 1 : -1] ref_logits /= args.temperature + 1e-7 ref_all_logprob = F.log_softmax(ref_logits, dim=-1) ref_logprob = torch.gather(ref_all_logprob, 2, response.unsqueeze(-1)).squeeze(-1) del ref_output, ref_logits, ref_all_logprob torch.cuda.empty_cache() # Response Processing 1. truncate response after the first occurrence of `stop_token_id` postprocessed_response = response if args.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0 postprocessed_response = truncate_response( args.stop_token_id, tokenizer.pad_token_id, response ) # Response Processing 2. run reward model on the truncated responses postprocessed_query_response = torch.cat((query, postprocessed_response), 1) sequence_length = first_true_indices(postprocessed_response == tokenizer.pad_token_id) - 1 _, score, _ = get_reward( reward_model, postprocessed_query_response, tokenizer.pad_token_id, context_length ) responses.append(response) postprocessed_responses.append(postprocessed_response) logprobs.append(logprob) ref_logprobs.append(ref_logprob) sequence_lengths.append(sequence_length) scores.append(score) responses = torch.cat(responses, 0) postprocessed_responses = torch.cat(postprocessed_responses, 0) logprobs = torch.cat(logprobs, 0) ref_logprobs = torch.cat(ref_logprobs, 0) sequence_lengths = torch.cat(sequence_lengths, 0) scores = torch.cat(scores, 0) del (logprob, ref_logprob, score) torch.cuda.empty_cache() gc.collect() # Response Processing 3. filter response. Ensure that the sample contains stop_token_id # responses not passing that filter will receive a low (fixed) score # only query humans on responses that pass that filter contain_eos_token = torch.any(postprocessed_responses == tokenizer.eos_token_id, dim=-1) if args.non_eos_penalty: scores = torch.where(contain_eos_token, scores, args.penalty_reward_value) # accelerator.print(f"{scores=}, {(contain_eos_token.sum() / len(contain_eos_token))=}") # be very careful with `padding_mask_p1`; see https://excalidraw.com/#json=LWnzG4w2k5DjF_EOL_xPt,e2w3a-hFJ_gX5vOfeyXGTw response_idxs = torch.arange(responses.shape[1], device=responses.device).repeat(responses.shape[0], 1) padding_mask = response_idxs > sequence_lengths.unsqueeze(1) logprobs = torch.masked_fill(logprobs, padding_mask, INVALID_LOGPROB) ref_logprobs = torch.masked_fill(ref_logprobs, padding_mask, INVALID_LOGPROB) # 4. compute rewards kl = logprobs - ref_logprobs non_score_reward = (-args.kl_coef * kl).sum(1) rlhf_reward = scores + non_score_reward # vectorized RLOO advantages implementation rlhf_reward = rlhf_reward.reshape(args.rloo_k, -1) baseline = (rlhf_reward.sum(0) - rlhf_reward) / (args.rloo_k - 1) advantages = rlhf_reward - baseline advantages = advantages.flatten() torch.cuda.empty_cache() # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch for ppo_epoch_idx in range(args.num_ppo_epochs): b_inds = np.random.permutation(args.local_batch_size) minibatch_idx = 0 for mini_batch_start in range(0, args.local_batch_size, args.local_mini_batch_size): mini_batch_end = mini_batch_start + args.local_mini_batch_size mini_batch_inds = b_inds[mini_batch_start:mini_batch_end] gradient_accumulation_idx = 0 for micro_batch_start in range(0, args.local_mini_batch_size, args.per_device_train_batch_size): with accelerator.accumulate(model): micro_batch_end = micro_batch_start + args.per_device_train_batch_size micro_batch_inds = mini_batch_inds[micro_batch_start:micro_batch_end] mb_advantage = advantages[micro_batch_inds] mb_responses = responses[micro_batch_inds] mb_query_responses = query_responses[micro_batch_inds] mb_logprobs = logprobs[micro_batch_inds] output = forward(model, mb_query_responses, tokenizer.pad_token_id) logits = output.logits[:, context_length - 1 : -1] logits /= args.temperature + 1e-7 new_all_logprobs = F.log_softmax(logits, dim=-1) new_logprobs = torch.gather(new_all_logprobs, 2, mb_responses.unsqueeze(-1)).squeeze(-1) new_logprobs = torch.masked_fill( new_logprobs, padding_mask[micro_batch_inds], INVALID_LOGPROB ) new_ratio = (new_logprobs - mb_logprobs).exp() new_logprobs = new_logprobs.sum(1) mb_logprobs = mb_logprobs.sum(1) logprobs_diff = new_logprobs - mb_logprobs ratio = torch.exp(logprobs_diff) pg_losses = -mb_advantage * ratio pg_losses2 = -mb_advantage * torch.clamp(ratio, 1.0 - args.cliprange, 1.0 + args.cliprange) pg_loss_max = torch.max(pg_losses, pg_losses2) pg_loss = pg_loss_max.mean() loss = pg_loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() with torch.no_grad(): pg_clipfrac = (pg_losses2 > pg_losses).float().mean() prob_dist = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, dim=-1) - torch.sum(prob_dist * logits, dim=-1) approxkl = 0.5 * (logprobs_diff**2).mean() approxkl_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = approxkl pg_clipfrac_stats[ ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx ] = pg_clipfrac pg_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = pg_loss entropy_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = entropy.mean() ratio_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = new_ratio.mean() gradient_accumulation_idx += 1 minibatch_idx += 1 self.state.global_step += 1 # del everything and empty cache # fmt: off del ( output, logits, new_all_logprobs, new_logprobs, logprobs_diff, ratio, pg_losses, pg_losses2, pg_loss, loss, pg_clipfrac, prob_dist, entropy, approxkl, mb_advantage, mb_responses, mb_query_responses, mb_logprobs, ) # fmt: on torch.cuda.empty_cache() with torch.no_grad(): mean_kl = kl.sum(1).mean() mean_entropy = (-logprobs).sum(1).mean() mean_non_score_reward = non_score_reward.mean() eps = int(self.state.episode / (time.time() - start_time)) metrics = {} metrics["eps"] = eps metrics["objective/kl"] = self.accelerator.gather(mean_kl).mean().item() metrics["objective/entropy"] = self.accelerator.gather(mean_entropy).mean().item() metrics["objective/non_score_reward"] = self.accelerator.gather(mean_non_score_reward).mean().item() metrics["objective/rlhf_reward"] = self.accelerator.gather(rlhf_reward).mean().item() metrics["objective/scores"] = self.accelerator.gather(scores.mean()).mean().item() metrics["policy/approxkl_avg"] = self.accelerator.gather(approxkl_stats).mean().item() metrics["policy/clipfrac_avg"] = self.accelerator.gather(pg_clipfrac_stats).mean().item() metrics["loss/policy_avg"] = self.accelerator.gather(pg_loss_stats).mean().item() metrics["loss/value_avg"] = self.accelerator.gather(vf_loss_stats).mean().item() metrics["val/clipfrac_avg"] = self.accelerator.gather(vf_clipfrac_stats).mean().item() metrics["policy/entropy_avg"] = self.accelerator.gather(entropy_stats).mean().item() metrics["val/ratio"] = self.accelerator.gather(ratio_stats).mean().item() metrics["val/ratio_var"] = self.accelerator.gather(ratio_stats).var().item() metrics["val/num_eos_tokens"] = (responses == tokenizer.eos_token_id).sum().item() metrics["lr"] = self.lr_scheduler.get_last_lr()[0] metrics["episode"] = self.state.episode self.state.epoch = self.state.episode / self.train_dataset_len # used by self.log self.state.global_step += 1 self.log(metrics) del kl, mean_kl, mean_entropy, scores self.lr_scheduler.step() self.control = self.callback_handler.on_step_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None, metrics=metrics) self.control = self.callback_handler.on_save(self.args, self.state, self.control) torch.cuda.empty_cache() gc.collect() if args.num_sample_generations > 0 and (update - 1) % self.sample_generations_freq == 0: self.generate_completions(sampling=True) # HF trainer specifics self.control = self.callback_handler.on_train_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None, metrics=None) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def generate_completions(self, sampling: bool = False): args = self.args tokenizer = self.tokenizer generation_config = GenerationConfig( max_new_tokens=self.args.response_length, temperature=(0.01 + 1e-7), top_k=0.0, top_p=1.0, do_sample=True, ) table = defaultdict(list) with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model: for batch in self.eval_dataloader: query = batch["input_ids"] with torch.no_grad(): context_length = query.shape[1] query_response, _ = batch_generation( unwrapped_model, query, query.shape[0], tokenizer.pad_token_id, generation_config, ) response = query_response[:, context_length:] postprocessed_response = response if args.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0 postprocessed_response = truncate_response( args.stop_token_id, tokenizer.pad_token_id, response ) table["query"].extend(gather_object(tokenizer.batch_decode(query, skip_special_tokens=True))) table["model response"].extend(gather_object(tokenizer.batch_decode(postprocessed_response))) postprocessed_query_response = torch.cat((query, postprocessed_response), 1) _, score, _ = get_reward( self.reward_model, postprocessed_query_response, tokenizer.pad_token_id, context_length ) table["score"].extend(self.accelerator.gather(score).float().cpu().numpy()) if sampling: break df = pd.DataFrame(table) if self.accelerator.is_main_process: print_rich_table(df.iloc[0 : 0 + 5]) if "wandb" in args.report_to: import wandb if wandb.run is not None: wandb.log({"completions": wandb.Table(dataframe=df)}) @wraps(Trainer.push_to_hub) def push_to_hub( self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs, ) -> str: """ Overwrite the `push_to_hub` method in order to force-add the tag "rloo" when pushing the model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details. Unlike the parent class, we don't use the `token` argument to mitigate security risks. """ kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs)
trl/trl/trainer/rloo_trainer.py/0
{ "file_path": "trl/trl/trainer/rloo_trainer.py", "repo_id": "trl", "token_count": 13242 }
460