repo_id
stringclasses
55 values
file_path
stringlengths
42
186
content
stringlengths
1
333k
__index_level_0__
int64
0
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/m2m_100/__init__.py
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_m2m_100"] = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config, M2M100OnnxConfig from .tokenization_m2m_100 import M2M100Tokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_m2m_100 import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, M2M100ForConditionalGeneration, M2M100Model, M2M100PreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/m2m_100/modeling_m2m_100.py
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch M2M100 model.""" import math from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from .configuration_m2m_100 import M2M100Config if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "M2M100Config" _CHECKPOINT_FOR_DOC = "facebook/m2m100_418M" from ..deprecated._archive_maps import M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx # Copied from transformers.models.bart.modeling_bart.BartScaledWordEmbedding with Bart->M2M100 class M2M100ScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale class M2M100SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer("weights", emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward( self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0 ): if input_ids is not None: bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) else: bsz, seq_len = inputs_embeds.size()[:-1] position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len + past_key_values_length if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach() def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->M2M100 class M2M100Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[M2M100Config] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) class M2M100FlashAttention2(M2M100Attention): def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[M2M100Config] = None, ): super().__init__(embed_dim, num_heads, dropout, is_decoder, bias, is_causal, config) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, q_len, _ = hidden_states.size() # get query proj query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0].transpose(1, 2) value_states = past_key_value[1].transpose(1, 2) elif is_cross_attention: # cross_attentions key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) else: # self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] attn_output = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout, softmax_scale=None ) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, q_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, None, past_key_value # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`float`): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal ) return attn_output # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) value_layer = index_first_axis( value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) # Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->M2M100, MBART->M2M100 class M2M100EncoderLayer(nn.Module): def __init__(self, config: M2M100Config): super().__init__() self.embed_dim = config.d_model self.self_attn = M2M100_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs M2M100_ATTENTION_CLASSES = { "eager": M2M100Attention, "flash_attention_2": M2M100FlashAttention2, } # Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->M2M100, MBART->M2M100 class M2M100DecoderLayer(nn.Module): def __init__(self, config: M2M100Config): super().__init__() self.embed_dim = config.d_model self.self_attn = M2M100_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = M2M100_ATTENTION_CLASSES[config._attn_implementation]( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class M2M100PreTrainedModel(PreTrainedModel): config_class = M2M100Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["M2M100Attention"] _supports_flash_attn_2 = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() M2M_100_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`M2M100Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ M2M_100_GENERATION_EXAMPLE = r""" Translation example: ```python >>> from transformers import AutoTokenizer, M2M100ForConditionalGeneration >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/m2m100_418M") >>> text_to_translate = "Life is like a box of chocolates" >>> model_inputs = tokenizer(text_to_translate, return_tensors="pt") >>> # translate to French >>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("fr")) >>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)) ``` """ M2M_100_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) M2M100 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class M2M100Encoder(M2M100PreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`M2M100EncoderLayer`]. Args: config: M2M100Config embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = M2M100ScaledWordEmbedding( config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale ) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = M2M100SinusoidalPositionalEmbedding( config.max_position_embeddings, embed_dim, self.padding_idx, ) self.layers = nn.ModuleList([M2M100EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input_ids, inputs_embeds) embed_pos = embed_pos.to(inputs_embeds.device) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class M2M100Decoder(M2M100PreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`M2M100DecoderLayer`] Args: config: M2M100Config embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = M2M100ScaledWordEmbedding( config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale ) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = M2M100SinusoidalPositionalEmbedding( config.max_position_embeddings, config.d_model, self.padding_idx, ) self.layers = nn.ModuleList([M2M100DecoderLayer(config) for _ in range(config.decoder_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self._use_flash_attention_2: # 2d mask is passed through the layers combined_attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None else: # 4d mask is passed through the layers combined_attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: if self._use_flash_attention_2: encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting" " `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, combined_attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if skip_the_layer: continue if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare M2M100 Model outputting raw hidden-states without any specific head on top.", M2M_100_START_DOCSTRING, ) class M2M100Model(M2M100PreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: M2M100Config): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = M2M100ScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = M2M100Encoder(config, self.shared) self.decoder = M2M100Decoder(config, self.shared) if config._attn_implementation == "flash_attention_2": logger.warning_once( "Attention with Flash Attention 2 does not support `layer_head_mask`. If you need this feature, please use standard attention." ) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(M2M_100_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The M2M100 Model with a language modeling head. Can be used for summarization.", M2M_100_START_DOCSTRING ) class M2M100ForConditionalGeneration(M2M100PreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: M2M100Config): super().__init__(config) self.model = M2M100Model(config) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(M2M_100_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(M2M_100_GENERATION_EXAMPLE) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) masked_lm_loss = None if labels is not None: # move labels to the correct device to enable PP labels = labels.to(lm_logits.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/m2m_100/tokenization_m2m_100.py
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for M2M100.""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", "tokenizer_config_file": "tokenizer_config.json", } # fmt: off FAIRSEQ_LANGUAGE_CODES = { "m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"], "wmt21": ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de'] } # fmt: on class M2M100Tokenizer(PreTrainedTokenizer): """ Construct an M2M100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. spm_file (`str`): Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that contains the vocabulary. src_lang (`str`, *optional*): A string representing the source language. tgt_lang (`str`, *optional*): A string representing the target language. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. language_codes (`str`, *optional*, defaults to `"m2m100"`): What language codes to use. Should be one of `"m2m100"` or `"wmt21"`. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Examples: ```python >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="en", tgt_lang="ro") >>> src_text = " UN Chief Says There Is No Military Solution in Syria" >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") >>> outputs = model(**model_inputs) # should work ```""" vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__( self, vocab_file, spm_file, src_lang=None, tgt_lang=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", pad_token="<pad>", unk_token="<unk>", language_codes="m2m100", sp_model_kwargs: Optional[Dict[str, Any]] = None, num_madeup_words=8, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.language_codes = language_codes fairseq_language_code = FAIRSEQ_LANGUAGE_CODES[language_codes] self.lang_code_to_token = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code} additional_special_tokens = kwargs.pop("additional_special_tokens", []) for lang_code in fairseq_language_code: token = self.get_lang_token(lang_code) if token not in additional_special_tokens and lang_code not in str(token) not in self.added_tokens_encoder: additional_special_tokens.append(token) self.vocab_file = vocab_file self.encoder = load_json(vocab_file) self.decoder = {v: k for k, v in self.encoder.items()} self.spm_file = spm_file self.sp_model = load_spm(spm_file, self.sp_model_kwargs) self.encoder_size = len(self.encoder) self.lang_token_to_id = { self.get_lang_token(lang_code): self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code) } self.lang_code_to_id = {lang_code: self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)} self.id_to_lang_token = {v: k for k, v in self.lang_token_to_id.items()} self._src_lang = src_lang if src_lang is not None else "en" self.tgt_lang = tgt_lang self.cur_lang_id = self.get_lang_id(self._src_lang) self.num_madeup_words = num_madeup_words super().__init__( src_lang=src_lang, tgt_lang=tgt_lang, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, unk_token=unk_token, pad_token=pad_token, language_codes=language_codes, sp_model_kwargs=self.sp_model_kwargs, additional_special_tokens=additional_special_tokens, num_madeup_words=num_madeup_words, **kwargs, ) self.set_src_lang_special_tokens(self._src_lang) @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self) -> Dict: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(token, self.encoder[self.unk_token]) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the decoder.""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def __getstate__(self) -> Dict: state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d: Dict) -> None: self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: save_dir = Path(save_directory) if not save_dir.is_dir(): raise OSError(f"{save_directory} should be a directory") vocab_save_path = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) spm_save_path = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder, vocab_save_path) if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file): copyfile(self.spm_file, spm_save_path) elif not os.path.isfile(self.spm_file): with open(spm_save_path, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (str(vocab_save_path), str(spm_save_path)) def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "ro", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self.src_lang) return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) def _build_translation_inputs(self, raw_inputs, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, **extra_kwargs) tgt_lang_id = self.get_lang_id(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def _switch_to_input_mode(self): self.set_src_lang_special_tokens(self.src_lang) def _switch_to_target_mode(self): self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang: str) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" lang_token = self.get_lang_token(src_lang) self.cur_lang_id = self.lang_token_to_id[lang_token] self.prefix_tokens = [self.cur_lang_id] self.suffix_tokens = [self.eos_token_id] def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" lang_token = self.get_lang_token(tgt_lang) self.cur_lang_id = self.lang_token_to_id[lang_token] self.prefix_tokens = [self.cur_lang_id] self.suffix_tokens = [self.eos_token_id] def get_lang_token(self, lang: str) -> str: return self.lang_code_to_token[lang] def get_lang_id(self, lang: str) -> int: lang_token = self.get_lang_token(lang) return self.lang_token_to_id[lang_token] def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor: spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs) spm.Load(str(path)) return spm def load_json(path: str) -> Union[Dict, List]: with open(path, "r") as f: return json.load(f) def save_json(data, path: str) -> None: with open(path, "w") as f: json.dump(data, f, indent=2)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/modeling_t5.py
# coding=utf-8 # Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch T5 model.""" import copy import math import os import warnings from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from ...utils.model_parallel_utils import assert_device_map, get_device_map from .configuration_t5 import T5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" _CHECKPOINT_FOR_DOC = "google-t5/t5-small" #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### from ..deprecated._archive_maps import T5_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 #################################################### # This is a conversion method from TF 1.0 to PyTorch # More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28 #################################################### def load_tf_weights_in_t5(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) tf_weights[name] = array for txt_name in names: name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") tf_weights.pop(txt_name, None) continue if "_slot_" in name[-1]: logger.info(f"Skipping {'/'.join(name)}") tf_weights.pop(txt_name, None) continue pointer = model array = tf_weights[txt_name] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") elif scope_names[0] == "self_attention": pointer = getattr(pointer, "layer") pointer = pointer[0] elif scope_names[0] == "enc_dec_attention": pointer = getattr(pointer, "layer") pointer = pointer[1] elif scope_names[0] == "dense_relu_dense": pointer = getattr(pointer, "layer") pointer = pointer[2] elif scope_names[0] == "rms_norm": if hasattr(pointer, "layer_norm"): pointer = getattr(pointer, "layer_norm") elif hasattr(pointer, "final_layer_norm"): pointer = getattr(pointer, "final_layer_norm") elif scope_names[0] == "scale": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") elif scope_names[0] == "decoder" and name[1] == "logits": continue elif scope_names[0] == "logits": pointer = getattr(pointer, "lm_head") elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit(): pointer = getattr(pointer, f"wi_{scope_names[1]}") continue else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if scope_names[0] not in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") if scope_names[0] != "embedding": logger.info(f"Transposing numpy weight of shape {array.shape} for {name}") array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array.astype(np.float32)) tf_weights.pop(txt_name, None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.") return model #################################################### # PyTorch Models are constructed by sub-classing # - torch.nn.Module for the layers and # - PreTrainedModel for the models (it-self a sub-class of nn.Module) #################################################### PARALLELIZE_DOCSTRING = r""" This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices. Args: device_map (`Dict[int, list]`, optional, defaults to None): A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the t5 models have the following number of attention modules: - google-t5/t5-small: 6 - google-t5/t5-base: 12 - google-t5/t5-large: 24 - google-t5/t5-3b: 24 - google-t5/t5-11b: 24 Example: ```python # Here is an example of a device map on a machine with 4 GPUs using google-t5/t5-3b, which has a total of 24 attention modules: model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-3b") device_map = { 0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23], } model.parallelize(device_map) ``` """ DEPARALLELIZE_DOCSTRING = r""" Moves the model to cpu from a model parallel state. Example: ```python # On a 4 GPU machine with google-t5/t5-3b: model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-3b") device_map = { 0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23], } model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() ``` """ class T5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm T5LayerNorm = FusedRMSNorm # noqa logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm") except ImportError: # using the normal T5LayerNorm pass except Exception: logger.warning("discovered apex but it failed to load, falling back to T5LayerNorm") pass ALL_LAYERNORM_LAYERS.append(T5LayerNorm) class T5DenseActDense(nn.Module): def __init__(self, config: T5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class T5DenseGatedActDense(nn.Module): def __init__(self, config: T5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. # See https://github.com/huggingface/transformers/issues/20287 # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class T5LayerFF(nn.Module): def __init__(self, config: T5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = T5DenseGatedActDense(config) else: self.DenseReluDense = T5DenseActDense(config) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class T5Attention(nn.Module): def __init__(self, config: T5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: if len(past_key_value) != 2: raise ValueError( f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" ) real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): """reshape""" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class T5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class T5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class T5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(T5LayerCrossAttention(config)) self.layer.append(T5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (key / value) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class T5ClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config: T5Config): super().__init__() self.dense = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(p=config.classifier_dropout) self.out_proj = nn.Linear(config.d_model, config.num_labels) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class T5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config load_tf_weights = load_tf_weights_in_t5 base_model_prefix = "transformer" is_parallelizable = True supports_gradient_checkpointing = True _no_split_modules = ["T5Block"] _keep_in_fp32_modules = ["wo"] @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, T5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance( module, (T5Model, T5ForConditionalGeneration, T5EncoderModel, T5ForQuestionAnswering), ): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "qa_outputs"): module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) module.qa_outputs.bias.data.zero_() elif isinstance(module, T5ForTokenClassification): if hasattr(module, "classifier"): module.classifier.weight.data.normal_(mean=0.0, std=factor * 1.0) module.classifier.bias.data.zero_() elif isinstance(module, T5ClassificationHead): module.dense.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.dense, "bias") and module.dense.bias is not None: module.dense.bias.data.zero_() module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None: module.out_proj.bias.data.zero_() elif isinstance(module, T5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5Attention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. " "See T5 docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class T5Stack(T5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList( [T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`T5Stack.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model" " with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0," " 'block.1': 1, ...}", FutureWarning, ) # Check validity of device_map self.device_map = ( get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.block)) self.model_parallel = True self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) self.last_device = "cuda:" + str(max(self.device_map.keys())) # Load onto devices for k, v in self.device_map.items(): for layer in v: cuda_device = "cuda:" + str(k) self.block[layer] = self.block[layer].to(cuda_device) # Set embed_tokens to first layer self.embed_tokens = self.embed_tokens.to(self.first_device) # Set final layer norm to last device self.final_layer_norm = self.final_layer_norm.to(self.last_device) @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.model_parallel = False self.device_map = None self.first_device = "cpu" self.last_device = "cpu" for i in range(len(self.block)): self.block[i] = self.block[i].to("cpu") self.embed_tokens = self.embed_tokens.to("cpu") self.final_layer_norm = self.final_layer_norm.to("cpu") torch.cuda.empty_cache() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # Model parallel if self.model_parallel: torch.cuda.set_device(self.first_device) self.embed_tokens = self.embed_tokens.to(self.first_device) use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones( encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long ) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) # Ensure that attention_mask is always on the same device as hidden_states if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if position_bias is not None: position_bias = position_bias.to(hidden_states.device) if encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states.to(hidden_states.device) if encoder_extended_attention_mask is not None: encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device) if encoder_decoder_position_bias is not None: encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device) if layer_head_mask is not None: layer_head_mask = layer_head_mask.to(hidden_states.device) if cross_attn_layer_head_mask is not None: cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hidden_states = hidden_states.to("cuda:" + str(k + 1)) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) T5_START_DOCSTRING = r""" The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`T5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ T5_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ T5_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @add_start_docstrings( "The bare T5 Model transformer outputting raw hidden-states without any specific head on top.", T5_START_DOCSTRING, ) class T5Model(T5PreTrainedModel): _keys_to_ignore_on_load_unexpected = [ "decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: T5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`T5Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model" " with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'encoder.block.0':" " 0, 'encoder.block.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, T5Model >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = T5Model.from_pretrained("google-t5/t5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model. >>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("""T5 Model with a `language modeling` head on top.""", T5_START_DOCSTRING) class T5ForConditionalGeneration(T5PreTrainedModel): _keys_to_ignore_on_load_unexpected = [ "decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: T5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`T5ForConditionalGeneration.parallelize` is deprecated and will be removed in v5 of Transformers, you" " should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also" " provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance" " {'encoder.block.0': 0, 'encoder.block.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.decoder.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.lm_head = self.lm_head.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, T5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> # training >>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> input_ids = tokenizer( ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) >>> # studies have shown that owning a dog is good for you. ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.encoder.first_device) self.lm_head = self.lm_head.to(self.encoder.first_device) sequence_output = sequence_output.to(self.lm_head.weight.device) if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, decoder_attention_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return { "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "decoder_attention_mask": decoder_attention_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) if reordered_layer_past_states[0].shape != layer_past_states[0].shape: raise ValueError( f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched" ) if len(reordered_layer_past_states) != len(layer_past_states): raise ValueError( f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched" ) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( "The bare T5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", T5_START_DOCSTRING, ) class T5EncoderModel(T5PreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight"] _keys_to_ignore_on_load_unexpected = [r"decoder"] def __init__(self, config: T5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`T5EncoderModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0," " 'block.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.encoder.deparallelize() self.encoder = self.encoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, T5EncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = T5EncoderModel.from_pretrained("google-t5/t5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs @add_start_docstrings( """ T5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, T5_START_DOCSTRING, ) class T5ForSequenceClassification(T5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: T5Config): super().__init__(config) self.transformer = T5Model(config) self.classification_head = T5ClassificationHead(config) # Initialize weights and apply final processing self.post_init() self.model_parallel = False @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) # Copied from models.bart.modeling_bart.BartModel.forward different to other models, T5 automatically creates # decoder_input_ids from input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = self._shift_right(input_ids) outputs = self.transformer( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") batch_size, _, hidden_size = sequence_output.shape sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ T5 Encoder Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, T5_START_DOCSTRING, ) class T5ForTokenClassification(T5PreTrainedModel): _tied_weights_keys = ["transformer.encoder.embed_tokens.weight"] def __init__(self, config: T5Config): super().__init__(config) self.num_labels = config.num_labels self.transformer = T5EncoderModel(config) self.dropout = nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits, outputs[2:-1]) return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ T5 Model with a span classification head on top for extractive question-answering tasks like SQuAD (linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, T5_START_DOCSTRING, ) class T5ForQuestionAnswering(T5PreTrainedModel): _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: T5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) self.num_labels = config.num_labels self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() self.model_parallel = False def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if start_positions is not None and end_positions is not None: use_cache = False # Copied from models.bart.modeling_bart.BartModel.forward # different to other models, T5 automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = self._shift_right(input_ids) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=None, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + decoder_outputs[1:] + encoder_outputs return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/tokenization_t5_fast.py
# coding=utf-8 # Copyright 2018 T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model T5.""" import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_t5 import T5Tokenizer else: T5Tokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} # TODO(PVP) - this should be removed in Transformers v5 class T5TokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" T5 tokenizer (backed by HuggingFace's *tokenizers* library). Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. extra_ids (`int`, *optional*, defaults to 100): Add a number of extra ids added to the vocabulary for use as sentinels. These tokens are accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. These tokens can be retrieved by calling get_sentinel_tokens method and token ids can be by calling get_sentinel_token_ids method additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. add_prefix_space (`bool`, *optional*): Whether or not the tokenizer should automatically add a prefix space from_slow (`book`, *optional*, defaults to `False`): Whether or not the tokenizer should be converted from a slow one. If `add_prefix_space` is set, this will be set to `True`. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = T5Tokenizer prefix_tokens: List[int] = [] def __init__( self, vocab_file=None, tokenizer_file=None, eos_token="</s>", unk_token="<unk>", pad_token="<pad>", extra_ids=100, additional_special_tokens=None, add_prefix_space=None, **kwargs, ): # Add extra_ids to the special token list if additional_special_tokens is not None: extra_tokens = [x for x in additional_special_tokens if "<extra_id_" in str(x)] if len(extra_tokens) < 1: additional_special_tokens += [f"<extra_id_{i}>" for i in range(extra_ids)] elif extra_ids > 0 and extra_ids != len(extra_tokens): raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens" ) else: extra_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)] additional_special_tokens = extra_tokens if add_prefix_space is not None: logger.warning_once( "You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers" ) kwargs["from_slow"] = True super().__init__( vocab_file, tokenizer_file=tokenizer_file, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=extra_ids, additional_special_tokens=additional_special_tokens, **kwargs, ) self.vocab_file = vocab_file self._extra_ids = extra_ids @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False @staticmethod def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length): if pretrained_model_name_or_path in T5TokenizerFast.max_model_input_sizes: deprecated_max_model_length = T5TokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value.", FutureWarning, ) return max_model_length def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) logger.info(f"Copy vocab file to {out_vocab_file}") return (out_vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ token_ids_0 = token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return self.prefix_tokens + token_ids_0 else: token_ids_1 = token_ids_1 + [self.eos_token_id] return self.prefix_tokens + token_ids_0 + token_ids_1 def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0] def get_sentinel_tokens(self): return list( set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens)) ) def get_sentinel_token_ids(self): return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()]
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/configuration_t5.py
# coding=utf-8 # Copyright 2020, The T5 Authors and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ T5 model configuration""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeq2SeqConfigWithPast from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import T5_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class T5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`T5Model`] or a [`TFT5Model`]. It is used to instantiate a T5 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the T5 [google-t5/t5-small](https://huggingface.co/google-t5/t5-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Arguments: vocab_size (`int`, *optional*, defaults to 32128): Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`]. d_model (`int`, *optional*, defaults to 512): Size of the encoder layers and the pooler layer. d_kv (`int`, *optional*, defaults to 64): Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`. d_ff (`int`, *optional*, defaults to 2048): Size of the intermediate feed forward layer in each `T5Block`. num_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_decoder_layers (`int`, *optional*): Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. num_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. relative_attention_num_buckets (`int`, *optional*, defaults to 32): The number of buckets to use for each attention layer. relative_attention_max_distance (`int`, *optional*, defaults to 128): The maximum distance of the longer sequences for the bucket separation. dropout_rate (`float`, *optional*, defaults to 0.1): The ratio for all dropout layers. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. layer_norm_eps (`float`, *optional*, defaults to 1e-6): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). feed_forward_proj (`string`, *optional*, defaults to `"relu"`): Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. T5v1.1 uses the `"gated-gelu"` feed forward projection. Original T5 uses `"relu"`. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). """ model_type = "t5" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self, vocab_size=32128, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-6, initializer_factor=1.0, feed_forward_proj="relu", is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, classifier_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.d_kv = d_kv self.d_ff = d_ff self.num_layers = num_layers self.num_decoder_layers = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry self.num_heads = num_heads self.relative_attention_num_buckets = relative_attention_num_buckets self.relative_attention_max_distance = relative_attention_max_distance self.dropout_rate = dropout_rate self.classifier_dropout = classifier_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_factor = initializer_factor self.feed_forward_proj = feed_forward_proj self.use_cache = use_cache act_info = self.feed_forward_proj.split("-") self.dense_act_fn = act_info[-1] self.is_gated_act = act_info[0] == "gated" if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. " "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": self.dense_act_fn = "gelu_new" super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs, ) class T5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence" common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") return common_inputs @property def default_onnx_opset(self) -> int: return 13
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/modeling_tf_t5.py
# coding=utf-8 # Copyright 2020 T5 Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 T5 model.""" from __future__ import annotations import copy import itertools import math import warnings from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from tensorflow.compiler.tf2xla.python.xla import dynamic_slice from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_t5 import T5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" from ..deprecated._archive_maps import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 #################################################### # TF 2.0 Models are constructed using Keras imperative API by sub-classing # - keras.layers.Layer for the layers and # - TFPreTrainedModel for the models (it-self a sub-class of keras.Model) #################################################### class TFT5LayerNorm(keras.layers.Layer): def __init__(self, hidden_size, epsilon=1e-6, **kwargs): """ Construct a layernorm module in the T5 style No bias and no subtraction of mean. """ super().__init__(**kwargs) self.variance_epsilon = epsilon self.hidden_size = hidden_size def build(self, input_shape): """Build shared word embedding layer""" self.weight = self.add_weight("weight", shape=(self.hidden_size,), initializer="ones") super().build(input_shape) def call(self, hidden_states): variance = tf.math.reduce_mean(tf.math.square(hidden_states), axis=-1, keepdims=True) hidden_states = hidden_states * tf.math.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states class TFT5DenseActDense(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) wi_initializer = keras.initializers.RandomNormal( mean=0, stddev=config.initializer_factor * (config.d_model**-0.5) ) wo_initializer = keras.initializers.RandomNormal( mean=0, stddev=config.initializer_factor * (config.d_ff**-0.5) ) self.wi = keras.layers.Dense( config.d_ff, use_bias=False, name="wi", kernel_initializer=wi_initializer ) # Update init weights as in flax self.wo = keras.layers.Dense( config.d_model, use_bias=False, name="wo", kernel_initializer=wo_initializer ) # Update init weights as in flax self.dropout = keras.layers.Dropout(config.dropout_rate) self.act = get_tf_activation(config.dense_act_fn) self.config = config def call(self, hidden_states, training=False): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.wo(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "wi", None) is not None: with tf.name_scope(self.wi.name): self.wi.build([None, None, self.config.d_model]) if getattr(self, "wo", None) is not None: with tf.name_scope(self.wo.name): self.wo.build([None, None, self.config.d_ff]) class TFT5DenseGatedActDense(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) wi_initializer = keras.initializers.RandomNormal( mean=0, stddev=config.initializer_factor * (config.d_model**-0.5) ) wo_initializer = keras.initializers.RandomNormal( mean=0, stddev=config.initializer_factor * (config.d_ff**-0.5) ) self.wi_0 = keras.layers.Dense( config.d_ff, use_bias=False, name="wi_0", kernel_initializer=wi_initializer ) # Update init weights as in flax self.wi_1 = keras.layers.Dense( config.d_ff, use_bias=False, name="wi_1", kernel_initializer=wi_initializer ) # Update init weights as in flax self.wo = keras.layers.Dense( config.d_model, use_bias=False, name="wo", kernel_initializer=wo_initializer ) # Update init weights as in flax self.dropout = keras.layers.Dropout(config.dropout_rate) self.act = get_tf_activation(config.dense_act_fn) self.config = config def call(self, hidden_states, training=False): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.wo(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "wi_0", None) is not None: with tf.name_scope(self.wi_0.name): self.wi_0.build([None, None, self.config.d_model]) if getattr(self, "wi_1", None) is not None: with tf.name_scope(self.wi_1.name): self.wi_1.build([None, None, self.config.d_model]) if getattr(self, "wo", None) is not None: with tf.name_scope(self.wo.name): self.wo.build([None, None, self.config.d_ff]) class TFT5LayerFF(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.is_gated_act: self.DenseReluDense = TFT5DenseGatedActDense(config, name="DenseReluDense") else: self.DenseReluDense = TFT5DenseActDense(config, name="DenseReluDense") self.layer_norm = TFT5LayerNorm(config.d_model, epsilon=config.layer_norm_epsilon, name="layer_norm") self.dropout = keras.layers.Dropout(config.dropout_rate) def call(self, hidden_states, training=False): normed_hidden_states = self.layer_norm(hidden_states) dense_output = self.DenseReluDense(normed_hidden_states, training=training) hidden_states = hidden_states + self.dropout(dense_output, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build(None) if getattr(self, "DenseReluDense", None) is not None: with tf.name_scope(self.DenseReluDense.name): self.DenseReluDense.build(None) class TFT5Attention(keras.layers.Layer): NEW_ID = itertools.count() def __init__(self, config, has_relative_attention_bias=False, **kwargs): super().__init__(**kwargs) self.layer_id = next(TFT5Attention.NEW_ID) self.is_decoder = config.is_decoder self.use_cache = config.use_cache self.has_relative_attention_bias = has_relative_attention_bias self.output_attentions = config.output_attentions self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax q_initializer = keras.initializers.RandomNormal( mean=0, stddev=config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5) ) k_initializer = keras.initializers.RandomNormal( mean=0, stddev=config.initializer_factor * (self.inner_dim**-0.5) ) v_initializer = keras.initializers.RandomNormal( mean=0, stddev=config.initializer_factor * (self.inner_dim**-0.5) ) o_initializer = keras.initializers.RandomNormal( mean=0, stddev=config.initializer_factor * (self.inner_dim**-0.5) ) self.relative_attention_bias_initializer = keras.initializers.RandomNormal( mean=0, stddev=config.initializer_factor * (self.inner_dim**-0.5) ) self.q = keras.layers.Dense( self.inner_dim, use_bias=False, name="q", kernel_initializer=q_initializer ) # Update init weights as in flax self.k = keras.layers.Dense( self.inner_dim, use_bias=False, name="k", kernel_initializer=k_initializer ) # Update init weights as in flax self.v = keras.layers.Dense( self.inner_dim, use_bias=False, name="v", kernel_initializer=v_initializer ) # Update init weights as in flax self.o = keras.layers.Dense( self.d_model, use_bias=False, name="o", kernel_initializer=o_initializer ) # Update init weights as in flax self.dropout = keras.layers.Dropout(config.dropout_rate) self.pruned_heads = set() def build(self, input_shape=None): if self.built: return self.built = True if self.has_relative_attention_bias: with tf.name_scope("relative_attention_bias"): self.relative_attention_bias = self.add_weight( name="embeddings", shape=[self.relative_attention_num_buckets, self.n_heads], initializer=self.relative_attention_bias_initializer, # Add initializer ) if getattr(self, "q", None) is not None: with tf.name_scope(self.q.name): self.q.build([None, None, self.d_model]) if getattr(self, "k", None) is not None: with tf.name_scope(self.k.name): self.k.build([None, None, self.d_model]) if getattr(self, "v", None) is not None: with tf.name_scope(self.v.name): self.v.build([None, None, self.d_model]) if getattr(self, "o", None) is not None: with tf.name_scope(self.o.name): self.o.build([None, None, self.inner_dim]) def prune_heads(self, heads): raise NotImplementedError @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 # n = -relative_position if bidirectional: num_buckets //= 2 relative_buckets += ( tf.cast(tf.math.greater(relative_position, 0), dtype=relative_position.dtype) * num_buckets ) relative_position = tf.math.abs(relative_position) else: relative_position = -tf.math.minimum(relative_position, 0) # now n is in the range [0, inf) max_exact = num_buckets // 2 is_small = tf.math.less(relative_position, max_exact) relative_position_if_large = max_exact + tf.cast( tf.math.log(tf.cast(relative_position, tf.float32) / tf.cast(max_exact, tf.float32)) / math.log(max_distance / max_exact) * (num_buckets - max_exact), dtype=relative_position.dtype, ) relative_position_if_large = tf.math.minimum(relative_position_if_large, num_buckets - 1) relative_buckets += tf.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length): """Compute binned relative position bias""" context_position = tf.range(query_length)[:, None] memory_position = tf.range(key_length)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = tf.gather( self.relative_attention_bias, relative_position_bucket ) # shape (query_length, key_length, num_heads) values = tf.expand_dims( tf.transpose(values, [2, 0, 1]), axis=0 ) # shape (1, num_heads, query_length, key_length) return values def call( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, training=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, query_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = shape_list(hidden_states)[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), f"past_key_value should have 2 past states: keys and values. Got {len(past_key_value)} past states" real_seq_length += shape_list(past_key_value[0])[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else shape_list(key_value_states)[1] def shape(hidden_states): """projection""" return tf.transpose( tf.reshape(hidden_states, (batch_size, -1, self.n_heads, self.key_value_proj_dim)), perm=(0, 2, 1, 3) ) def unshape(hidden_states): """compute context""" return tf.reshape(tf.transpose(hidden_states, perm=(0, 2, 1, 3)), (batch_size, -1, self.inner_dim)) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = tf.concat([past_key_value, hidden_states], axis=2) else: # cross-attn hidden_states = past_key_value return hidden_states # get query query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, query_length, dim_per_head) # get key/value key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) # to cope with keras serialization if self.is_decoder and use_cache: present_key_value_state = (key_states, value_states) else: present_key_value_state = None scores = tf.einsum( "bnqd,bnkd->bnqk", query_states, key_states ) # (batch_size, n_heads, query_length, key_length) if position_bias is None: if not self.has_relative_attention_bias: position_bias = tf.zeros((1, self.n_heads, real_seq_length, key_length)) else: position_bias = self.compute_bias(real_seq_length, key_length) # if key and values are already calculated we want only the last query position bias if past_key_value is not None: if not self.has_relative_attention_bias: position_bias = position_bias[:, :, -seq_length:, :] else: # we might have a padded past structure, in which case we want to fetch the position bias slice # right after the most recently filled past index most_recently_filled_past_index = tf.reduce_max(tf.where(past_key_value[0][0, 0, :, 0] != 0.0)) position_bias = dynamic_slice( position_bias, (0, 0, most_recently_filled_past_index + 1, 0), (1, self.n_heads, seq_length, real_seq_length), ) if mask is not None: position_bias = tf.cast(position_bias, dtype=mask.dtype) position_bias = position_bias + mask # (batch_size, n_heads, query_length, key_length) scores += position_bias weights = stable_softmax(scores, axis=-1) # (batch_size, n_heads, query_length, key_length) weights = self.dropout(weights, training=training) # (batch_size, n_heads, query_length, key_length) # Mask heads if we want to if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.n_heads], message=( f"Head mask for a single layer should be of size {(self.n_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * weights attn_output = tf.matmul(weights, value_states) # (batch_size, n_heads, query_length, dim_per_head) attn_output = self.o(unshape(attn_output)) outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (weights,) return outputs class TFT5LayerSelfAttention(keras.layers.Layer): def __init__(self, config, has_relative_attention_bias=False, **kwargs): super().__init__(**kwargs) self.SelfAttention = TFT5Attention( config, has_relative_attention_bias=has_relative_attention_bias, name="SelfAttention", ) self.layer_norm = TFT5LayerNorm(config.d_model, epsilon=config.layer_norm_epsilon, name="layer_norm") self.dropout = keras.layers.Dropout(config.dropout_rate) def call( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, training=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, training=training, ) hidden_states = hidden_states + self.dropout(attention_output[0], training=training) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "SelfAttention", None) is not None: with tf.name_scope(self.SelfAttention.name): self.SelfAttention.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build(None) class TFT5LayerCrossAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.EncDecAttention = TFT5Attention( config, has_relative_attention_bias=False, name="EncDecAttention", ) self.layer_norm = TFT5LayerNorm(config.d_model, epsilon=config.layer_norm_epsilon, name="layer_norm") self.dropout = keras.layers.Dropout(config.dropout_rate) def call( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, query_length=None, use_cache=False, output_attentions=False, training=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, training=training, ) hidden_states = hidden_states + self.dropout(attention_output[0], training=training) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "EncDecAttention", None) is not None: with tf.name_scope(self.EncDecAttention.name): self.EncDecAttention.build(None) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build(None) class TFT5Block(keras.layers.Layer): def __init__(self, config, has_relative_attention_bias=False, **kwargs): super().__init__(**kwargs) self.is_decoder = config.is_decoder self.layer = [] self.layer.append( TFT5LayerSelfAttention( config, has_relative_attention_bias=has_relative_attention_bias, name="layer_._0", ) ) if self.is_decoder: self.layer.append( TFT5LayerCrossAttention( config, name="layer_._1", ) ) self.layer.append(TFT5LayerFF(config, name=f"layer_._{len(self.layer)}")) def call( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, training=False, ): if past_key_value is not None: assert self.is_decoder, "Only decoder can use `past_key_values`" expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (key / value) for cross attention' if expected_num_past_key_values == 4 else ''}. " f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, training=training, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights if self.is_decoder and encoder_hidden_states is not None: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = shape_list(present_key_value_state[0])[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=encoder_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, training=training, ) hidden_states = cross_attention_outputs[0] # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states, training=training) outputs = (hidden_states,) # Add attentions if we output them outputs = outputs + (present_key_value_state,) + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias) def build(self, input_shape=None): if self.built: return self.built = True for layer_module in self.layer: if hasattr(layer_module, "name"): with tf.name_scope(layer_module.name): layer_module.build(None) #################################################### # The full model without a specific pretrained or finetuning head is # provided as a keras.layers.Layer usually called "TFT5MainLayer" #################################################### @keras_serializable class TFT5MainLayer(keras.layers.Layer): config_class = T5Config def __init__(self, config, embed_tokens=None, **kwargs): super().__init__(**kwargs) self.config = config self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.use_cache = config.use_cache self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.config = config self.num_hidden_layers = config.num_layers self.block = [ TFT5Block(config, has_relative_attention_bias=bool(i == 0), name=f"block_._{i}") for i in range(config.num_layers) ] self.final_layer_norm = TFT5LayerNorm( config.d_model, epsilon=config.layer_norm_epsilon, name="final_layer_norm" ) self.dropout = keras.layers.Dropout(config.dropout_rate) def _prune_heads(self, heads_to_prune): raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models @unpack_inputs def call( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, encoder_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ) -> Tuple: if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, (-1, input_shape[-1])) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = ( shape_list(past_key_values[0][0])[2] + seq_length if past_key_values is not None else seq_length ) if attention_mask is None: attention_mask = tf.fill((batch_size, mask_seq_length), 1) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = shape_list(encoder_hidden_states)[1] encoder_attention_mask = tf.fill((batch_size, encoder_seq_length), 1) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. attention_mask = tf.cast(attention_mask, dtype=inputs_embeds.dtype) num_dims_attention_mask = len(shape_list(attention_mask)) if num_dims_attention_mask == 3: extended_attention_mask = attention_mask[:, None, :, :] elif num_dims_attention_mask == 2: # Provided a padding mask of dimensions [batch_size, mask_seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal( tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None], ) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] if past_key_values[0] is not None: extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = attention_mask[:, None, None, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -1e9 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # extended_attention_mask = tf.math.equal(extended_attention_mask, # tf.transpose(extended_attention_mask, perm=(-1, -2))) extended_attention_mask = (1.0 - extended_attention_mask) * -1e9 if self.is_decoder and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9 else: encoder_extended_attention_mask = None present_key_value_states = () if use_cache and self.is_decoder else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds, training=training) for idx, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=head_mask[idx] if head_mask is not None else None, encoder_layer_head_mask=encoder_head_mask[idx] if encoder_head_mask is not None else None, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, training=training, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias) hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, past_key_values, (self-attention weights), # (self-attention position bias), (cross-attention position bias), (cross-attention weights), position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if present_key_value_state is not None and use_cache and self.is_decoder: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states, training=training) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: outputs = (hidden_states,) # need to check if is decoder here as well for special cases when using keras compile if use_cache and self.is_decoder: outputs = outputs + (present_key_value_states,) if output_hidden_states: outputs = outputs + (all_hidden_states,) if output_attentions: outputs = outputs + (all_attentions,) if self.is_decoder: outputs + (all_cross_attentions,) return outputs # last-layer hidden state, (past_key_values), (all hidden states), (all attentions), (all_cross_attentions) if self.is_decoder: return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) else: return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build(None) if getattr(self, "block", None) is not None: for layer in self.block: with tf.name_scope(layer.name): layer.build(None) #################################################### # TFT5PreTrainedModel is a sub-class of keras.Model # which take care of loading and saving pretrained weights # and various common utilities. # Here you just need to specify a few (self-explanatory) # pointers for your model. #################################################### class TFT5PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config base_model_prefix = "transformer" # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"decoder\Wblock[\W_0]+layer[\W_1]+EncDecAttention\Wrelative_attention_bias"] def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared if hasattr(self, "decoder"): self.decoder.embed_tokens = self.shared def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( "self.model.config.decoder_start_token_id has to be defined. In TF T5 it is usually set to the" " pad_token_id. See T5 docs for more information" ) start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) start_tokens = tf.cast(start_tokens, input_ids.dtype) # Ensure compatible dtypes for concatenation shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.cast(tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids.dtype), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal( shifted_input_ids, tf.constant(0, dtype=shifted_input_ids.dtype) ) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids T5_START_DOCSTRING = r""" The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`T5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ T5_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on the right or the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `inputs` for pretraining take a look at [T5 Training](./t5#training). decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for sequence to sequence training. T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(tf.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ T5_ENCODER_INPUTS_DOCSTRING = r""" Args: inputs (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on the right or the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. To know more on how to prepare `inputs` for pre-training take a look at [T5 Training](./t5#training). attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ _HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = tf.ones((num_layers, num_heads))`. """ @add_start_docstrings( "The bare T5 Model transformer outputting raw hidden-stateswithout any specific head on top.", T5_START_DOCSTRING, ) class TFT5Model(TFT5PreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.shared = keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=keras.initializers.TruncatedNormal(self.config.initializer_factor), name="shared", ) # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) self.shared.load_weight_prefix = "shared" encoder_config = copy.deepcopy(config) encoder_config.use_cache = False self.encoder = TFT5MainLayer(encoder_config, self.shared, name="encoder") decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.num_layers = config.num_decoder_layers self.decoder = TFT5MainLayer(decoder_config, self.shared, name="decoder") def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @unpack_inputs @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, decoder_head_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFSeq2SeqModelOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, TFT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = TFT5Model.from_pretrained("google-t5/t5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="tf" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="tf").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model. >>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: warnings.warn(_HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids, attention_mask=attention_mask, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=inputs_embeds, head_mask=head_mask, past_key_values=None, use_cache=False, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, inputs_embeds=decoder_inputs_embeds, head_mask=decoder_head_mask, encoder_head_mask=head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) past = decoder_outputs[1] if use_cache else None if not return_dict: if past_key_values is not None: decoder_outputs = decoder_outputs[:1] + (past,) + decoder_outputs[2:] return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=past, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True # The shared/tied weights expect to be in the model base namespace # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than # the current one. with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"): self.shared.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings("""T5 Model with a `language modeling` head on top.""", T5_START_DOCSTRING) class TFT5ForConditionalGeneration(TFT5PreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model_dim = config.d_model self.shared = keras.layers.Embedding( config.vocab_size, config.d_model, name="shared", embeddings_initializer=get_initializer(self.config.initializer_factor), ) # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) self.shared.load_weight_prefix = "shared" encoder_config = copy.deepcopy(config) encoder_config.use_cache = False self.encoder = TFT5MainLayer(encoder_config, self.shared, name="encoder") decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.num_layers = config.num_decoder_layers self.decoder = TFT5MainLayer(decoder_config, self.shared, name="decoder") if not config.tie_word_embeddings: lm_head_initializer = keras.initializers.RandomNormal(mean=0, stddev=config.initializer_factor) self.lm_head = keras.layers.Dense( config.vocab_size, use_bias=False, name="lm_head", kernel_initializer=lm_head_initializer ) # Update init weights as in flax self.config = config def get_output_embeddings(self): if self.config.tie_word_embeddings: return self.get_input_embeddings() else: # in a dense layer the kernel has a shape (last_dim, units), for us (dim, num_tokens) # value has a shape (num_tokens, dim) then needs to be transposed return tf.transpose(self.lm_head.kernel) def set_output_embeddings(self, value): if self.config.tie_word_embeddings: self.set_input_embeddings(value) else: lm_head_initializer = keras.initializers.RandomNormal(mean=0, stddev=self.config.initializer_factor) self.lm_head = keras.layers.Dense( shape_list(value)[0], use_bias=False, name="lm_head", kernel_initializer=lm_head_initializer ) # Update init weights as in flax # in a dense layer the kernel has a shape (last_dim, units), for us (dim, num_tokens) # value has a shape (num_tokens, dim) then needs to be transposed transposed_value = tf.transpose(value) self.lm_head.kernel = transposed_value def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @unpack_inputs @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, decoder_head_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFSeq2SeqLMOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. Returns: Examples: ```python >>> from transformers import AutoTokenizer, TFT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = TFT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> # training >>> inputs = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="tf").input_ids >>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="tf").input_ids >>> outputs = model(inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> # inference >>> inputs = tokenizer( ... "summarize: studies have shown that owning a dog is good for you", return_tensors="tf" ... ).input_ids # Batch size 1 >>> outputs = model.generate(inputs) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) >>> # studies have shown that owning a dog is good for you ```""" # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: warnings.warn(_HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, inputs_embeds=decoder_inputs_embeds, head_mask=decoder_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = decoder_outputs[0] # T5v1.1 does not tie output word embeddings and thus does not require downscaling if self.config.tie_word_embeddings: sequence_output = sequence_output * (self.model_dim**-0.5) logits = tf.matmul(sequence_output, self.shared.weights, transpose_b=True) else: logits = self.lm_head(sequence_output) logits = tf.cast(logits, tf.float32) loss = None if labels is None else self.hf_compute_loss(labels, logits) past = decoder_outputs[1] if use_cache else None if not return_dict: if past_key_values is not None: decoder_outputs = decoder_outputs[:1] + (past,) + decoder_outputs[2:] output = (logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif isinstance(encoder_outputs, tuple): last_hidden_state = encoder_outputs[0] hidden_states = None attentions = None idx = 0 if output_hidden_states: idx += 1 hidden_states = encoder_outputs[idx] if output_attentions: idx += 1 attentions = encoder_outputs[idx] encoder_outputs = TFBaseModelOutput( last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=attentions, ) return TFSeq2SeqLMOutput( loss=loss, logits=logits, past_key_values=past, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values[1:]) if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput( logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return { "input_ids": None, # needs to be passed to make Keras.layer.__call__ happy "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor): return self._shift_right(labels) def build(self, input_shape=None): if self.built: return self.built = True # The shared/tied weights expect to be in the model base namespace # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than # the current one. with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"): self.shared.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build([None, None, self.config.d_model]) @add_start_docstrings( "The bare T5 Model transformer outputting encoder's raw hidden-stateswithout any specific head on top.", T5_START_DOCSTRING, ) class TFT5EncoderModel(TFT5PreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.shared = keras.layers.Embedding( config.vocab_size, config.d_model, name="shared", embeddings_initializer=get_initializer(self.config.initializer_factor), ) # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) self.shared.load_weight_prefix = "shared" encoder_config = copy.deepcopy(config) encoder_config.use_cache = False self.encoder = TFT5MainLayer(encoder_config, self.shared, name="encoder") def get_encoder(self): return self.encoder @unpack_inputs @add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFBaseModelOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, TFT5EncoderModel >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = TFT5EncoderModel.from_pretrained("google-t5/t5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="tf" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids) ```""" encoder_outputs = self.encoder( input_ids, attention_mask=attention_mask, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=inputs_embeds, head_mask=head_mask, past_key_values=None, use_cache=False, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return encoder_outputs return TFBaseModelOutput( last_hidden_state=encoder_outputs.last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True # The shared/tied weights expect to be in the model base namespace # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than # the current one. with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"): self.shared.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/download_from_gcp.sh
#!/usr/bin/env bash # Use this script as follows ./download_from_gcp.sh /path/to/folder/to/store/downloads folder_to_store_downloads=${1} # Replace by gcp_path to T5 cloud bucket folder here # To download the official `t5-small` model of https://github.com/google-research/text-to-text-transfer-transformer#released-model-checkpoints: gcp_path="gs://t5-data/pretrained_models/small" # Number of files the checkpoint is split into num_of_checks=16 # Create dir if not exist mkdir -p ${folder_to_store_downloads} # Copy all meta information files gsutil cp "${gcp_path}/operative_config.gin" ${folder_to_store_downloads} gsutil cp "${gcp_path}/checkpoint" ${folder_to_store_downloads} gsutil cp "${gcp_path}/model.ckpt-1000000.index" ${folder_to_store_downloads} gsutil cp "${gcp_path}/model.ckpt-1000000.meta" ${folder_to_store_downloads} # Copy all model weights # single digit num checkpoitns for ((i = 0 ; i < ${num_of_checks} ; i++)); do gsutil cp "${gcp_path}/model.ckpt-1000000.data-0000${i}-of-000${num_of_checks}" ${folder_to_store_downloads} done # double digit num checkpoints for ((i = 0 ; i < ${num_of_checks} ; i++)); do gsutil cp "${gcp_path}/model.ckpt-1000000.data-000${i}-of-000${num_of_checks}" ${folder_to_store_downloads} done # Having run this script, you should create a suitable config.json, *e.g.* by # looking at `https://huggingface.co/t5-small`. # Then you can run `python convert_t5_original_tf_checkpoint_to_pytorch.py --tf_checkpoint_path "${folder_to_store_downloads}" --config_file "config.json" --pytorch_dump_path "/path/to/store/pytorch/weights"
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = {"configuration_t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config", "T5OnnxConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_t5"] = ["T5Tokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_t5_fast"] = ["T5TokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_t5"] = [ "T5_PRETRAINED_MODEL_ARCHIVE_LIST", "T5EncoderModel", "T5ForConditionalGeneration", "T5Model", "T5PreTrainedModel", "load_tf_weights_in_t5", "T5ForQuestionAnswering", "T5ForSequenceClassification", "T5ForTokenClassification", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_t5"] = [ "TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST", "TFT5EncoderModel", "TFT5ForConditionalGeneration", "TFT5Model", "TFT5PreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_t5"] = [ "FlaxT5EncoderModel", "FlaxT5ForConditionalGeneration", "FlaxT5Model", "FlaxT5PreTrainedModel", ] if TYPE_CHECKING: from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config, T5OnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_t5 import T5Tokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_t5_fast import T5TokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_t5 import ( T5_PRETRAINED_MODEL_ARCHIVE_LIST, T5EncoderModel, T5ForConditionalGeneration, T5ForQuestionAnswering, T5ForSequenceClassification, T5ForTokenClassification, T5Model, T5PreTrainedModel, load_tf_weights_in_t5, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_t5 import ( TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model, TFT5PreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_t5 import ( FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model, FlaxT5PreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/convert_t5x_checkpoint_to_flax.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert T5X checkpoints from the original repository to JAX/FLAX model.""" import argparse from t5x import checkpoints from transformers import FlaxT5ForConditionalGeneration, T5Config def convert_t5x_checkpoint_to_flax(t5x_checkpoint_path, config_name, flax_dump_folder_path): config = T5Config.from_pretrained(config_name) flax_model = FlaxT5ForConditionalGeneration(config=config) t5x_model = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) split_mlp_wi = "wi_0" in t5x_model["target"]["encoder"]["layers_0"]["mlp"] # Encoder for layer_index in range(config.num_layers): layer_name = f"layers_{str(layer_index)}" # Self-Attention t5x_attention_key = t5x_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] t5x_attention_out = t5x_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] t5x_attention_query = t5x_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] t5x_attention_value = t5x_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Layer Normalization t5x_attention_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: t5x_mlp_wi_0 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] t5x_mlp_wi_1 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: t5x_mlp_wi = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] t5x_mlp_wo = t5x_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization t5x_mlp_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["k"][ "kernel" ] = t5x_attention_key flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["o"][ "kernel" ] = t5x_attention_out flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["q"][ "kernel" ] = t5x_attention_query flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["v"][ "kernel" ] = t5x_attention_value flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["0"]["layer_norm"][ "weight" ] = t5x_attention_layer_norm if split_mlp_wi: flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["1"]["DenseReluDense"]["wi_0"][ "kernel" ] = t5x_mlp_wi_0 flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["1"]["DenseReluDense"]["wi_1"][ "kernel" ] = t5x_mlp_wi_1 else: flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["1"]["DenseReluDense"]["wi"][ "kernel" ] = t5x_mlp_wi flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["1"]["DenseReluDense"]["wo"][ "kernel" ] = t5x_mlp_wo flax_model.params["encoder"]["block"][str(layer_index)]["layer"]["1"]["layer_norm"][ "weight" ] = t5x_mlp_layer_norm # Only for layer 0: t5x_encoder_rel_embedding = t5x_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T flax_model.params["encoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][ "embedding" ] = t5x_encoder_rel_embedding # Assigning t5x_encoder_norm = t5x_model["target"]["encoder"]["encoder_norm"]["scale"] flax_model.params["encoder"]["final_layer_norm"]["weight"] = t5x_encoder_norm # Decoder for layer_index in range(config.num_decoder_layers): layer_name = f"layers_{str(layer_index)}" # Self-Attention t5x_attention_key = t5x_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] t5x_attention_out = t5x_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] t5x_attention_query = t5x_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] t5x_attention_value = t5x_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization t5x_pre_attention_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention t5x_enc_dec_attention_key = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]["key"][ "kernel" ] t5x_enc_dec_attention_out = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]["out"][ "kernel" ] t5x_enc_dec_attention_query = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]["query"][ "kernel" ] t5x_enc_dec_attention_value = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]["value"][ "kernel" ] # Layer Normalization t5x_cross_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: t5x_mlp_wi_0 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] t5x_mlp_wi_1 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: t5x_mlp_wi = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] t5x_mlp_wo = t5x_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization tx5_mlp_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["k"][ "kernel" ] = t5x_attention_key flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["o"][ "kernel" ] = t5x_attention_out flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["q"][ "kernel" ] = t5x_attention_query flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["0"]["SelfAttention"]["v"][ "kernel" ] = t5x_attention_value flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["0"]["layer_norm"][ "weight" ] = t5x_pre_attention_layer_norm flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["1"]["EncDecAttention"]["k"][ "kernel" ] = t5x_enc_dec_attention_key flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["1"]["EncDecAttention"]["o"][ "kernel" ] = t5x_enc_dec_attention_out flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["1"]["EncDecAttention"]["q"][ "kernel" ] = t5x_enc_dec_attention_query flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["1"]["EncDecAttention"]["v"][ "kernel" ] = t5x_enc_dec_attention_value flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["1"]["layer_norm"][ "weight" ] = t5x_cross_layer_norm if split_mlp_wi: flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["2"]["DenseReluDense"]["wi_0"][ "kernel" ] = t5x_mlp_wi_0 flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["2"]["DenseReluDense"]["wi_1"][ "kernel" ] = t5x_mlp_wi_1 else: flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["2"]["DenseReluDense"]["wi"][ "kernel" ] = t5x_mlp_wi flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["2"]["DenseReluDense"]["wo"][ "kernel" ] = t5x_mlp_wo flax_model.params["decoder"]["block"][str(layer_index)]["layer"]["2"]["layer_norm"][ "weight" ] = tx5_mlp_layer_norm # Decoder Normalization tx5_decoder_norm = t5x_model["target"]["decoder"]["decoder_norm"]["scale"] flax_model.params["decoder"]["final_layer_norm"]["weight"] = tx5_decoder_norm # Only for layer 0: t5x_decoder_rel_embedding = t5x_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T flax_model.params["decoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][ "embedding" ] = t5x_decoder_rel_embedding # Token Embeddings tx5_token_embeddings = t5x_model["target"]["token_embedder"]["embedding"] flax_model.params["shared"]["embedding"] = tx5_token_embeddings # LM Head (only in v1.1 checkpoints) if "logits_dense" in t5x_model["target"]["decoder"]: flax_model.params["lm_head"]["kernel"] = t5x_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(flax_dump_folder_path) print("T5X Model was sucessfully converted!") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the TX5 checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) args = parser.parse_args() convert_t5x_checkpoint_to_flax(args.t5x_checkpoint_path, args.config_name, args.flax_dump_folder_path)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/convert_t5x_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2022 Google LLC and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert T5X checkpoint to PyTorch Steps: - Install gsutil according to https://cloud.google.com/storage/docs/gsutil_install - Get a T5X checkpoint at https://github.com/google-research/t5x/blob/main/docs/models.md#t5-11-checkpoints Example: `gsutil -m cp -r gs://t5-data/pretrained_models/t5x/t5_1_1_small $HOME/` - Create or download a corresponding config for the downloaded model. E.g. for T5 v1.1 small, you can use https://huggingface.co/google/t5-v1_1-small/blob/main/config.json - Convert: ``` python3 convert_t5x_checkpoint_to_pytorch.py --t5x_checkpoint_path=$HOME/t5_1_1_small --config_file=config.json\ --pytorch_dump_path=$HOME/t5_1_1_small_pt ``` """ import argparse import collections import torch from flax import traverse_util from t5x import checkpoints from transformers import T5Config, T5EncoderModel, T5ForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def t5x_attention_lookup(params, i, prefix, layer_name="attention"): """Returns the KOQV parameters of (self-)attention. Does not transpose.""" k = params[f"{prefix}/layers_{i}/{layer_name}/key/kernel"] o = params[f"{prefix}/layers_{i}/{layer_name}/out/kernel"] q = params[f"{prefix}/layers_{i}/{layer_name}/query/kernel"] v = params[f"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def t5x_mlp_lookup(params, i, prefix, split_mlp_wi=False): """Returns the MLP parameters of a layer. Does not transpose.""" if split_mlp_wi: wi_0 = params[f"{prefix}/layers_{i}/mlp/wi_0/kernel"] wi_1 = params[f"{prefix}/layers_{i}/mlp/wi_1/kernel"] wi = (wi_0, wi_1) else: wi = params[f"{prefix}/layers_{i}/mlp/wi/kernel"] wo = params[f"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def t5x_layer_norm_lookup(params, i, prefix, layer_name): """Returns the layer norm param of a layer.""" return params[f"{prefix}/layers_{i}/{layer_name}/scale"] def convert_t5x_to_pytorch(variables: dict, *, num_layers: int, num_decoder_layers: int, is_encoder_only: bool): """Converts the parameters from T5X-Flax to Transformers-PyTorch.""" old = traverse_util.flatten_dict(variables["target"]) old = {"/".join(k): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi split_mlp_wi = "encoder/layers_0/mlp/wi_0/kernel" in old print("Split MLP:", split_mlp_wi) new = collections.OrderedDict() # Shared embeddings. new["shared.weight"] = old["token_embedder/embedding"] # Encoder. for i in range(num_layers): # Block i, layer 0 (Self Attention). layer_norm = t5x_layer_norm_lookup(old, i, "encoder", "pre_attention_layer_norm") k, o, q, v = t5x_attention_lookup(old, i, "encoder", "attention") new[f"encoder.block.{i}.layer.0.layer_norm.weight"] = layer_norm new[f"encoder.block.{i}.layer.0.SelfAttention.k.weight"] = k.T new[f"encoder.block.{i}.layer.0.SelfAttention.o.weight"] = o.T new[f"encoder.block.{i}.layer.0.SelfAttention.q.weight"] = q.T new[f"encoder.block.{i}.layer.0.SelfAttention.v.weight"] = v.T # Block i, layer 1 (MLP). layer_norm = t5x_layer_norm_lookup(old, i, "encoder", "pre_mlp_layer_norm") wi, wo = t5x_mlp_lookup(old, i, "encoder", split_mlp_wi) new[f"encoder.block.{i}.layer.1.layer_norm.weight"] = layer_norm if split_mlp_wi: new[f"encoder.block.{i}.layer.1.DenseReluDense.wi_0.weight"] = wi[0].T new[f"encoder.block.{i}.layer.1.DenseReluDense.wi_1.weight"] = wi[1].T else: new[f"encoder.block.{i}.layer.1.DenseReluDense.wi.weight"] = wi.T new[f"encoder.block.{i}.layer.1.DenseReluDense.wo.weight"] = wo.T new["encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = old[ "encoder/relpos_bias/rel_embedding" ].T new["encoder.final_layer_norm.weight"] = old["encoder/encoder_norm/scale"] if not is_encoder_only: # Decoder. for i in range(num_decoder_layers): # Block i, layer 0 (Self Attention). layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_self_attention_layer_norm") k, o, q, v = t5x_attention_lookup(old, i, "decoder", "self_attention") new[f"decoder.block.{i}.layer.0.layer_norm.weight"] = layer_norm new[f"decoder.block.{i}.layer.0.SelfAttention.k.weight"] = k.T new[f"decoder.block.{i}.layer.0.SelfAttention.o.weight"] = o.T new[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"] = q.T new[f"decoder.block.{i}.layer.0.SelfAttention.v.weight"] = v.T # Block i, layer 1 (Cross Attention). layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_cross_attention_layer_norm") k, o, q, v = t5x_attention_lookup(old, i, "decoder", "encoder_decoder_attention") new[f"decoder.block.{i}.layer.1.layer_norm.weight"] = layer_norm new[f"decoder.block.{i}.layer.1.EncDecAttention.k.weight"] = k.T new[f"decoder.block.{i}.layer.1.EncDecAttention.o.weight"] = o.T new[f"decoder.block.{i}.layer.1.EncDecAttention.q.weight"] = q.T new[f"decoder.block.{i}.layer.1.EncDecAttention.v.weight"] = v.T # Block i, layer 2 (MLP). layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_mlp_layer_norm") wi, wo = t5x_mlp_lookup(old, i, "decoder", split_mlp_wi) new[f"decoder.block.{i}.layer.2.layer_norm.weight"] = layer_norm if split_mlp_wi: new[f"decoder.block.{i}.layer.2.DenseReluDense.wi_0.weight"] = wi[0].T new[f"decoder.block.{i}.layer.2.DenseReluDense.wi_1.weight"] = wi[1].T else: new[f"decoder.block.{i}.layer.2.DenseReluDense.wi.weight"] = wi.T new[f"decoder.block.{i}.layer.2.DenseReluDense.wo.weight"] = wo.T new["decoder.final_layer_norm.weight"] = old["decoder/decoder_norm/scale"] new["decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = old[ "decoder/relpos_bias/rel_embedding" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: new["lm_head.weight"] = old["decoder/logits_dense/kernel"].T return new def make_state_dict(converted_params, is_encoder_only: bool): """Prepares a state dict for the PyTorch model.""" # Make a state dict with torch tensors. state_dict = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()]) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: state_dict["encoder.embed_tokens.weight"] = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: state_dict["decoder.embed_tokens.weight"] = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head.") state_dict["lm_head.weight"] = state_dict["shared.weight"] return state_dict def load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only): """Replaces the params in model witht the T5X converted params.""" variables = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) converted = convert_t5x_to_pytorch( variables, num_layers=config.num_layers, num_decoder_layers=config.num_decoder_layers, is_encoder_only=is_encoder_only, ) state_dict = make_state_dict(converted, is_encoder_only) model.load_state_dict(state_dict, strict=True) def convert_t5x_checkpoint_to_pytorch( t5x_checkpoint_path, config_file, pytorch_dump_path, is_encoder_only: bool = False ): """Loads the config and model, converts the T5X checkpoint, and saves a PyTorch checkpoint.""" # Initialise PyTorch model config = T5Config.from_json_file(config_file) print(f"Building PyTorch model from configuration: {config}") # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: model = T5EncoderModel(config) else: model = T5ForConditionalGeneration(config) # Load weights from tf checkpoint load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) # Verify that we can load the checkpoint. model.from_pretrained(pytorch_dump_path) print("Done") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) args = parser.parse_args() convert_t5x_checkpoint_to_pytorch( args.t5x_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/modeling_flax_t5.py
# coding=utf-8 # Copyright 2021 T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax T5 model.""" import copy from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_t5 import T5Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google-t5/t5-small" _CONFIG_FOR_DOC = "T5Config" remat = nn_partitioning.remat # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right. """ shifted_input_ids = jnp.zeros_like(input_ids) shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids class FlaxT5LayerNorm(nn.Module): hidden_size: int dtype: jnp.dtype = jnp.float32 eps: float = 1e-6 weight_init: Callable[..., np.ndarray] = jax.nn.initializers.ones def setup(self): self.weight = self.param("weight", self.weight_init, (self.hidden_size,)) def __call__(self, hidden_states): """ Construct a layernorm module in the T5 style; No bias and no subtraction of mean. """ # layer norm should always be calculated in float32 variance = jnp.power(hidden_states.astype("f4"), 2).mean(axis=-1, keepdims=True) hidden_states = hidden_states / jnp.sqrt(variance + self.eps) return self.weight * hidden_states class FlaxT5DenseActDense(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 def setup(self): wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5) wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5) self.wi = nn.Dense( self.config.d_ff, use_bias=False, kernel_init=jax.nn.initializers.normal(wi_init_std), dtype=self.dtype, ) self.wo = nn.Dense( self.config.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(wo_init_std), dtype=self.dtype, ) self.dropout = nn.Dropout(self.config.dropout_rate) self.act = ACT2FN[self.config.dense_act_fn] def __call__(self, hidden_states, deterministic=True): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.wo(hidden_states) return hidden_states class FlaxT5DenseGatedActDense(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5) wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5) self.wi_0 = nn.Dense( self.config.d_ff, use_bias=False, kernel_init=jax.nn.initializers.normal(wi_init_std), dtype=self.dtype, ) self.wi_1 = nn.Dense( self.config.d_ff, use_bias=False, kernel_init=jax.nn.initializers.normal(wi_init_std), dtype=self.dtype, ) self.wo = nn.Dense( self.config.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(wo_init_std), dtype=self.dtype, ) self.dropout = nn.Dropout(self.config.dropout_rate) self.act = ACT2FN[self.config.dense_act_fn] def __call__(self, hidden_states, deterministic): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.wo(hidden_states) return hidden_states class FlaxT5LayerFF(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): if self.config.is_gated_act: self.DenseReluDense = FlaxT5DenseGatedActDense(self.config, dtype=self.dtype) else: self.DenseReluDense = FlaxT5DenseActDense(self.config, dtype=self.dtype) self.layer_norm = FlaxT5LayerNorm(self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__(self, hidden_states, deterministic=True): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states, deterministic=deterministic) hidden_states = hidden_states + self.dropout(forwarded_states, deterministic=deterministic) return hidden_states class FlaxT5Attention(nn.Module): config: T5Config has_relative_attention_bias: bool = False causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.relative_attention_num_buckets = self.config.relative_attention_num_buckets self.relative_attention_max_distance = self.config.relative_attention_max_distance self.d_model = self.config.d_model self.key_value_proj_dim = self.config.d_kv self.n_heads = self.config.num_heads self.dropout = self.config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5) kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5) o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5) self.q = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(q_init_std), dtype=self.dtype, ) self.k = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) self.v = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) self.o = nn.Dense( self.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(o_init_std), dtype=self.dtype, ) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embed( self.relative_attention_num_buckets, self.n_heads, embedding_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0) * num_buckets relative_position = jnp.abs(relative_position) else: relative_position = -jnp.clip(relative_position, a_max=0) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact) ) relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1) relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large) return relative_buckets.astype("i4") def compute_bias(self, query_length, key_length): """Compute binned relative position bias""" context_position = jnp.arange(query_length, dtype="i4")[:, None] memory_position = jnp.arange(key_length, dtype="i4")[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_position_bucket( relative_position, bidirectional=(not self.causal), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) values = values.transpose((2, 0, 1))[None, :, :, :] return values def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.inner_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = jax.lax.dynamic_update_slice(cached_key.value, key, indices) value = jax.lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions # that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def _create_position_bias( self, key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift ): cache_is_filled = self.causal and self.has_variable("cache", "cached_key") and (not init_cache) key_length = key_states.shape[1] query_length = key_length if cache_is_filled else query_states.shape[1] if self.has_relative_attention_bias: position_bias = self.compute_bias(query_length, key_length) elif attention_mask is not None: position_bias = jnp.zeros_like(attention_mask) else: position_bias = jnp.zeros((1, self.n_heads, query_length, key_length), dtype=self.dtype) # if key and values are already calculated, only the last query position bias should be taken if cache_is_filled: max_decoder_length = self.variables["cache"]["cached_key"].shape[1] position_bias = jax.lax.dynamic_slice( position_bias, (0, 0, causal_attention_mask_shift, 0), (1, self.n_heads, seq_length, max_decoder_length), ) return position_bias def __call__( self, hidden_states, attention_mask=None, key_value_states=None, position_bias=None, use_cache=False, output_attentions=False, deterministic=True, init_cache=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ batch_size, seq_length = hidden_states.shape[:2] # q, k, v projections query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head) key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states) value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states) # reshape to (batch_size, seq_length, n_heads, head_dim) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # counter-act scaling in dot_product_attention_weights function query_states *= jnp.sqrt(query_states.shape[-1]) # for fast decoding causal attention mask should be shifted causal_attention_mask_shift = ( self.variables["cache"]["cache_index"] if (self.has_variable("cache", "cached_key") and self.causal) else 0 ) # create causal attention_mask; attention_mask has to be defined when model is causal if self.causal: causal_attention_mask = make_causal_mask(attention_mask, dtype="bool") # fast decoding for generate requires special attention_mask if self.has_variable("cache", "cached_key"): max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_attention_mask = jax.lax.dynamic_slice( causal_attention_mask, (0, 0, causal_attention_mask_shift, 0), (1, 1, seq_length, max_decoder_length), ) # broadcast causal attention mask & attention mask to fit for merge causal_attention_mask = jnp.broadcast_to( causal_attention_mask, (batch_size,) + causal_attention_mask.shape[1:] ) attention_mask = jnp.broadcast_to( jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_attention_mask.shape ) attention_mask = combine_masks(attention_mask, causal_attention_mask) elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # replace masked positions with -10_000 if attention_mask is not None: mask_value = jnp.finfo(self.dtype).min attention_mask = jax.lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, mask_value).astype(self.dtype), ) if position_bias is None: # compute position bias (only for first layer) position_bias = self._create_position_bias( key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift ) if attention_mask is not None: position_bias = position_bias + attention_mask # create dropout rng dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") # Softmax(QK^T) attn_weights = dot_product_attention_weights( query_states, key_states, bias=position_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, ) # multiply with value states attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) # bring back to (batch_size, seq_length, d_model) attn_output = self._merge_heads(attn_output) # apply output matrix attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs class FlaxT5LayerSelfAttention(nn.Module): config: T5Config has_relative_attention_bias: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.SelfAttention = FlaxT5Attention( self.config, has_relative_attention_bias=self.has_relative_attention_bias, causal=self.config.causal, dtype=self.dtype, ) self.layer_norm = FlaxT5LayerNorm(self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, deterministic=True, init_cache=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, deterministic=deterministic, init_cache=init_cache, ) hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class FlaxT5LayerCrossAttention(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.EncDecAttention = FlaxT5Attention( self.config, has_relative_attention_bias=False, causal=False, dtype=self.dtype ) self.layer_norm = FlaxT5LayerNorm(self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, output_attentions=False, deterministic=True, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, attention_mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class FlaxT5Block(nn.Module): config: T5Config has_relative_attention_bias: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.causal = self.config.causal self.layer = ( FlaxT5LayerSelfAttention( self.config, has_relative_attention_bias=self.has_relative_attention_bias, name=str(0), dtype=self.dtype, ), ) feed_forward_index = 1 if self.causal: self.layer += (FlaxT5LayerCrossAttention(self.config, name=str(1), dtype=self.dtype),) feed_forward_index += 1 self.layer += (FlaxT5LayerFF(self.config, name=str(feed_forward_index), dtype=self.dtype),) def __call__( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, output_attentions=False, return_dict=True, deterministic=True, init_cache=False, ): self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, deterministic=deterministic, init_cache=init_cache, ) hidden_states = self_attention_outputs[0] attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights do_cross_attention = self.causal and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = cross_attention_outputs[0] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[1:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states, deterministic=deterministic) outputs = (hidden_states,) outputs = outputs + attention_outputs # returns hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) return outputs class FlaxT5LayerCollection(nn.Module): config: T5Config has_relative_attention_bias: bool dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layer = FlaxT5Block( self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype ) def __call__( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, output_attentions=False, deterministic=True, init_cache=False, ): return self.layer( hidden_states, attention_mask=attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, output_attentions=output_attentions, deterministic=deterministic, init_cache=init_cache, ) class FlaxT5BlockCollection(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.causal = self.config.causal if self.gradient_checkpointing: FlaxT5CheckpointLayer = remat(FlaxT5LayerCollection, static_argnums=(6, 7, 8)) self.blocks = [ FlaxT5CheckpointLayer( self.config, has_relative_attention_bias=(i == 0), dtype=self.dtype, name=str(i), ) for i in range(self.config.num_layers) ] else: self.blocks = [ FlaxT5LayerCollection( self.config, has_relative_attention_bias=(i == 0), dtype=self.dtype, name=str(i), ) for i in range(self.config.num_layers) ] def __call__( self, hidden_states=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions: bool = False, output_hidden_states: bool = False, deterministic: bool = True, init_cache: bool = False, ): # Prepare head mask if needed all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.causal) else None position_bias = None encoder_decoder_position_bias = None for i, layer_module in enumerate(self.blocks): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, position_bias, encoder_hidden_states, encoder_attention_mask, encoder_decoder_position_bias, output_attentions, deterministic, init_cache, ) hidden_states = layer_outputs[0] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[1] if self.causal and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.causal: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) class FlaxT5Stack(nn.Module): config: T5Config embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.causal = self.config.causal self.block = FlaxT5BlockCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.final_layer_norm = FlaxT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, init_cache: bool = False, ): hidden_states = self.embed_tokens(input_ids) hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, deterministic=deterministic, init_cache=init_cache, ) hidden_states = outputs[0] hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) # Add last layer all_hidden_states = None if output_hidden_states: all_hidden_states = outputs.hidden_states all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: if output_hidden_states: return ( hidden_states, all_hidden_states, ) + outputs[2:] return (hidden_states,) + outputs[1:] return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) T5_ENCODE_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ T5_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For training, `decoder_input_ids` should be provided. encoder_outputs (`tuple(tuple(jnp.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ T5_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. encoder_outputs (`tuple(tuple(jnp.ndarray)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(jnp.ndarray))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxT5PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config base_model_prefix = "transformer" module_class: nn.Module = None def __init__( self, config: T5Config, input_shape: Tuple[int] = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs, ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) args = [input_ids, attention_mask] if self.module_class not in [FlaxT5EncoderModule]: decoder_input_ids = jnp.ones_like(input_ids) decoder_attention_mask = jnp.ones_like(input_ids) args.extend([decoder_input_ids, decoder_attention_mask]) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, *args, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_input_ids: jnp.ndarray = None, decoder_attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if decoder_input_ids is None: raise ValueError( "Make sure to provide both `input_ids` and `decoder_input_ids`. `decoder_input_ids` is not passed" " here." ) # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # prepare decoder inputs if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) @add_start_docstrings(T5_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=T5Config) def encode( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, **kwargs) return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward, ) @add_start_docstrings(T5_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=T5Config) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration >>> import jax.numpy as jnp >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxT5Attention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, **kwargs, ) outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs T5_START_DOCSTRING = r""" The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting. This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`T5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ @add_start_docstrings( "The bare T5 Model transformer outputting raw hidden-stateswithout any specific head on top.", T5_START_DOCSTRING, ) class FlaxT5Module(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def setup(self): self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.initializer_factor * 1.0), dtype=self.dtype, ) encoder_config = copy.deepcopy(self.config) encoder_config.causal = False self.encoder = FlaxT5Stack( encoder_config, embed_tokens=self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) decoder_config = copy.deepcopy(self.config) decoder_config.causal = True decoder_config.num_layers = self.config.num_decoder_layers self.decoder = FlaxT5Stack( decoder_config, embed_tokens=self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None, deterministic: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) class FlaxT5Model(FlaxT5PreTrainedModel): module_class = FlaxT5Module append_call_sample_docstring(FlaxT5Model, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) FLAX_T5_MODEL_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5Model.from_pretrained("google-t5/t5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="np" ... ).input_ids >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="np").input_ids >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model. >>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ``` """ overwrite_call_docstring(FlaxT5Model, T5_INPUTS_DOCSTRING + FLAX_T5_MODEL_DOCSTRING) append_replace_return_docstrings(FlaxT5Model, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_start_docstrings( "The bare T5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", T5_START_DOCSTRING, ) class FlaxT5EncoderModule(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.initializer_factor * 1.0), dtype=self.dtype, ) encoder_config = copy.deepcopy(self.config) encoder_config.is_decoder = False encoder_config.is_encoder_decoder = False encoder_config.causal = False self.encoder = FlaxT5Stack( encoder_config, embed_tokens=self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, input_ids=None, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict: bool = True, deterministic: bool = True, ): # Encode if needed (training, first prediction pass) encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) return encoder_outputs class FlaxT5EncoderModel(FlaxT5PreTrainedModel): module_class = FlaxT5EncoderModule @add_start_docstrings_to_model_forward(T5_ENCODE_INPUTS_DOCSTRING) def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) @add_start_docstrings("""T5 Model with a `language modeling` head on top.""", T5_START_DOCSTRING) class FlaxT5ForConditionalGenerationModule(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def setup(self): self.model_dim = self.config.d_model self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.initializer_factor), dtype=self.dtype, ) encoder_config = copy.deepcopy(self.config) encoder_config.causal = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = FlaxT5Stack( encoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) decoder_config = copy.deepcopy(self.config) decoder_config.causal = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = self.config.num_decoder_layers self.decoder = FlaxT5Stack( decoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.lm_head = nn.Dense( self.config.vocab_size, use_bias=False, kernel_init=jax.nn.initializers.normal(self.config.initializer_factor), dtype=self.dtype, ) def __call__( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None, deterministic: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) if self.config.tie_word_embeddings: shared_embedding = self.shared.variables["params"]["embedding"] lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output) else: lm_logits = self.lm_head(sequence_output) if not return_dict: return (lm_logits,) + decoder_outputs[1:] + encoder_outputs return FlaxSeq2SeqLMOutput( logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) class FlaxT5ForConditionalGeneration(FlaxT5PreTrainedModel): module_class = FlaxT5ForConditionalGenerationModule @add_start_docstrings(T5_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=T5Config) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration >>> import jax.numpy as jnp >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> text = "summarize: My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxT5Attention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() decoder_outputs = decoder_module( decoder_input_ids, decoder_attention_mask, **kwargs, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.config.d_model**-0.5) if self.config.tie_word_embeddings: shared_embedding = module.shared.variables["params"]["embedding"] lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output) else: lm_logits = module.lm_head(sequence_output) return lm_logits, decoder_outputs outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions( logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, ) else: outputs = (lm_logits,) + decoder_outputs[1:] # add updated cache to model output if past_key_values is not None and return_dict: outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array] = None, decoder_attention_mask: Optional[jax.Array] = None, encoder_outputs=None, **kwargs, ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: extended_attention_mask = jax.lax.dynamic_update_slice( extended_attention_mask, decoder_attention_mask, (0, 0) ) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values return model_kwargs FLAX_T5_CONDITIONAL_GENERATION_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> ARTICLE_TO_SUMMARIZE = "summarize: My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], return_tensors="np") >>> # Generate Summary >>> summary_ids = model.generate(inputs["input_ids"]).sequences >>> print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)) ``` """ overwrite_call_docstring( FlaxT5ForConditionalGeneration, T5_INPUTS_DOCSTRING + FLAX_T5_CONDITIONAL_GENERATION_DOCSTRING ) append_replace_return_docstrings( FlaxT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/tokenization_t5.py
# coding=utf-8 # Copyright 2018 T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model T5.""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...convert_slow_tokenizer import import_protobuf from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} # TODO(PVP) - this should be removed in Transformers v5 SPIECE_UNDERLINE = "▁" class T5Tokenizer(PreTrainedTokenizer): """ Construct a T5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. extra_ids (`int`, *optional*, defaults to 100): Add a number of extra ids added to the vocabulary for use as sentinels. These tokens are accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. These tokens can be retrieved by calling get_sentinel_tokens method and token ids can be by calling get_sentinel_token_ids method additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. legacy (`bool`, *optional*): Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622 and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple example: - `legacy=True`: ```python >>> from transformers import T5Tokenizer >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=True) >>> tokenizer.encode("Hello <extra_id_0>.") [8774, 32099, 3, 5, 1] ``` - `legacy=False`: ```python >>> from transformers import T5Tokenizer >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=False) >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here [8774, 32099, 5, 1] ``` Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, eos_token="</s>", unk_token="<unk>", pad_token="<pad>", extra_ids=100, additional_special_tokens=None, sp_model_kwargs: Optional[Dict[str, Any]] = None, legacy=None, add_prefix_space=True, **kwargs, ) -> None: pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self._extra_ids = extra_ids self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) if additional_special_tokens is not None: extra_tokens = [x for x in additional_special_tokens if "<extra_id_" in str(x)] if len(extra_tokens) < 1: additional_special_tokens += [f"<extra_id_{i}>" for i in range(extra_ids)] elif extra_ids > 0 and extra_ids != len(extra_tokens): raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens" ) else: extra_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)] additional_special_tokens = extra_tokens # for legacy purpose, we keep this. Will be removed and tests updated. (when `added_tokens_decoder` is not passed as kwargs) self._added_tokens_decoder = {} for i in range(len(extra_tokens)): self._added_tokens_decoder[len(self.sp_model) - 1 + extra_ids - i] = AddedToken( f"<extra_id_{i}>", single_word=False, lstrip=True, rstrip=True, special=True, normalized=False ) if legacy is None: logger.warning_once( f"You are using the default legacy behaviour of the {self.__class__}. This is" " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you." " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it" " means, and thoroughly read the reason why this was added as explained in" " https://github.com/huggingface/transformers/pull/24565" ) legacy = True self.legacy = legacy self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False)) self.vocab_file = vocab_file self._extra_ids = extra_ids self.add_prefix_space = add_prefix_space super().__init__( eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=extra_ids, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, legacy=legacy, add_prefix_space=add_prefix_space, **kwargs, ) # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor def get_spm_processor(self, from_slow=False): tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs) if self.legacy or from_slow: # no dependency on protobuf tokenizer.Load(self.vocab_file) return tokenizer with open(self.vocab_file, "rb") as f: sp_model = f.read() model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)") model = model_pb2.ModelProto.FromString(sp_model) normalizer_spec = model_pb2.NormalizerSpec() normalizer_spec.add_dummy_prefix = False model.normalizer_spec.MergeFrom(normalizer_spec) sp_model = model.SerializeToString() tokenizer.LoadFromSerializedProto(sp_model) return tokenizer @staticmethod def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length): if pretrained_model_name_or_path in T5Tokenizer.max_model_input_sizes: deprecated_max_model_length = T5Tokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value.", FutureWarning, ) return max_model_length @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) # normal case: some special tokens if token_ids_1 is None: return ([0] * len(token_ids_0)) + [1] return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def get_sentinel_tokens(self): return list( set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens)) ) def get_sentinel_token_ids(self): return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()] def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]: """Do not add eos again if user already added it.""" if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0] def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ token_ids_0 = self._add_eos_if_not_present(token_ids_0) if token_ids_1 is None: return token_ids_0 else: token_ids_1 = self._add_eos_if_not_present(token_ids_1) return token_ids_0 + token_ids_1 def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def tokenize(self, text: "TextInput", **kwargs) -> List[str]: """ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the first token is special. """ if self.legacy or len(text) == 0: return super().tokenize(text, **kwargs) text = text.replace(SPIECE_UNDERLINE, " ") if self.add_prefix_space: text = SPIECE_UNDERLINE + text tokens = super().tokenize(text, **kwargs) if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens: tokens = tokens[1:] return tokens @property def unk_token_length(self): return len(self.sp_model.encode(str(self.unk_token))) def _tokenize(self, text, **kwargs): """ Returns a tokenized string. We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`. `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`. """ tokens = self.sp_model.encode(text, out_type=str) if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")): return tokens # 1. Encode string + prefix ex: "<unk> Hey" tokens = self.sp_model.encode(self.unk_token + text, out_type=str) # 2. Remove self.unk_token from ['<','unk','>', '▁Hey'] return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" # since we manually add the prefix space, we have to remove it when decoding if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space: tokens[0] = tokens[0][1:] current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The T5 authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert T5 checkpoint.""" import argparse from transformers import T5Config, T5ForConditionalGeneration, load_tf_weights_in_t5 from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): # Initialise PyTorch model config = T5Config.from_json_file(config_file) print(f"Building PyTorch model from configuration: {config}") model = T5ForConditionalGeneration(config) # Load weights from tf checkpoint load_tf_weights_in_t5(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/trocr/modeling_trocr.py
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch TrOCR decoder model (based on RoBERTa).""" import copy import math from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, logging, replace_return_docstrings from .configuration_trocr import TrOCRConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "TrOCRConfig" _CHECKPOINT_FOR_DOC = "microsoft/trocr-base-handwritten" from ..deprecated._archive_maps import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 # Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->TrOCR class TrOCRLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): # TrOCR is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): """`input_ids' shape is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids.shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ).expand(bsz, -1) return super().forward(positions + self.offset) # Copied from transformers.models.bart.modeling_bart.BartScaledWordEmbedding with Bart->TrOCR class TrOCRScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale class TrOCRSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.weights = self.get_embedding(num_positions, embedding_dim, padding_idx) self.register_buffer("_float_tensor", torch.FloatTensor(1)) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len if self.weights is None or max_pos > self.weights.size(0): # recompute/expand embeddings if needed self.weights = self.get_embedding(max_pos, self.embedding_dim, self.padding_idx) self.weights = self.weights.to(self._float_tensor) x = self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach() return x def create_position_ids_from_input_ids( self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0 ): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx class TrOCRAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper.""" def __init__( self, config, embed_dim: int, num_heads: int, kdim: int = None, vdim: int = None, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_cross_attention: bool = False, ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if not (self.head_dim * num_heads == self.embed_dim): raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias) self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class TrOCRDecoderLayer(nn.Module): def __init__(self, config: TrOCRConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = TrOCRAttention( config, embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) if config.is_decoder: self.encoder_attn = TrOCRAttention( config, embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, kdim=config.cross_attention_hidden_size, vdim=config.cross_attention_hidden_size, dropout=config.attention_dropout, is_decoder=True, is_cross_attention=True, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size *(decoder_attention_heads,)*. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class TrOCRPreTrainedModel(PreTrainedModel): config_class = TrOCRConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["TrOCRDecoderLayer"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() TROCR_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TrOCRConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ class TrOCRDecoder(TrOCRPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TrOCRDecoderLayer`] Args: config: TrOCRConfig """ def __init__(self, config: TrOCRConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 self.embed_tokens = TrOCRScaledWordEmbedding( config.vocab_size, config.hidden_size, self.padding_idx, embed_scale=embed_scale ) if config.use_learned_position_embeddings: self.embed_positions = TrOCRLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size) else: self.embed_positions = TrOCRSinusoidalPositionalEmbedding( config.max_position_embeddings + self.padding_idx + 1, config.hidden_size, self.padding_idx, ) if config.layernorm_embedding: self.layernorm_embedding = nn.LayerNorm(config.hidden_size) else: self.layernorm_embedding = None self.layers = nn.ModuleList([TrOCRDecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_ids = input_ids.view(-1, input.shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self.config.use_learned_position_embeddings: embed_pos = self.embed_positions(input, past_key_values_length=past_key_values_length) else: embed_pos = self.embed_positions(input_ids, past_key_values_length=past_key_values_length) hidden_states = inputs_embeds + embed_pos if self.layernorm_embedding is not None: hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) input_shape = input.shape attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The TrOCR Model with a language modeling head. Can be used for summarization.", TROCR_START_DOCSTRING, ) class TrOCRDecoderWrapper(TrOCRPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = TrOCRDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) @add_start_docstrings( "The TrOCR Decoder with a language modeling head. Can be used as the decoder part of [`EncoderDecoderModel`] and" " [`VisionEncoderDecoder`].", TROCR_START_DOCSTRING, ) class TrOCRForCausalLM(TrOCRPreTrainedModel): _tied_weights_keys = ["output_projection.weight"] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = TrOCRDecoderWrapper(config) self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.output_projection def set_output_embeddings(self, new_embeddings): self.output_projection = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import ( ... TrOCRConfig, ... TrOCRProcessor, ... TrOCRForCausalLM, ... ViTConfig, ... ViTModel, ... VisionEncoderDecoderModel, ... ) >>> import requests >>> from PIL import Image >>> # TrOCR is a decoder model and should be used within a VisionEncoderDecoderModel >>> # init vision2text model with random weights >>> encoder = ViTModel(ViTConfig()) >>> decoder = TrOCRForCausalLM(TrOCRConfig()) >>> model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder) >>> # If you want to start from the pretrained model, load the checkpoint with `VisionEncoderDecoderModel` >>> processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") >>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") >>> # load image from the IAM dataset >>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") >>> pixel_values = processor(image, return_tensors="pt").pixel_values >>> text = "industry, ' Mr. Brown commented icily. ' Let us have a" >>> # training >>> model.config.decoder_start_token_id = processor.tokenizer.eos_token_id >>> model.config.pad_token_id = processor.tokenizer.pad_token_id >>> model.config.vocab_size = model.config.decoder.vocab_size >>> labels = processor.tokenizer(text, return_tensors="pt").input_ids >>> outputs = model(pixel_values, labels=labels) >>> loss = outputs.loss >>> round(loss.item(), 2) 5.30 >>> # inference >>> generated_ids = model.generate(pixel_values) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> generated_text 'industry, " Mr. Brown commented icily. " Let us have a' ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.output_projection(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs ): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/trocr/processing_trocr.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for TrOCR. """ import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class TrOCRProcessor(ProcessorMixin): r""" Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor. [`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and [`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for more information. Args: image_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*): An instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input. tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*): An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self._in_target_context_manager = False def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to AutoImageProcessor's [`~AutoImageProcessor.__call__`] and returns its output. If used in the context [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's [`~TrOCRTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) images = kwargs.pop("images", None) text = kwargs.pop("text", None) if len(args) > 0: images = args[0] args = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: inputs = self.image_processor(images, *args, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif images is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR. """ warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.image_processor self._in_target_context_manager = False @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/trocr/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _import_structure = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_trocr"] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert TrOCR checkpoints from the unilm repository.""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(encoder_config, decoder_config): rename_keys = [] for i in range(encoder_config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append( (f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias")) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, encoder_config): for i in range(encoder_config.num_hidden_layers): # queries, keys and values (only weights, no biases) in_proj_weight = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight") state_dict[f"encoder.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ : encoder_config.hidden_size, : ] state_dict[f"encoder.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] state_dict[f"encoder.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -encoder_config.hidden_size :, : ] def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of the IAM Handwriting Database def prepare_img(checkpoint_url): if "handwritten" in checkpoint_url: url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: url = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" im = Image.open(requests.get(url, stream=True).raw).convert("RGB") return im @torch.no_grad() def convert_tr_ocr_checkpoint(checkpoint_url, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our VisionEncoderDecoderModel structure. """ # define encoder and decoder configs based on checkpoint_url encoder_config = ViTConfig(image_size=384, qkv_bias=False) decoder_config = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: decoder_config.encoder_hidden_size = 768 elif "large" in checkpoint_url: # use ViT-large encoder encoder_config.hidden_size = 1024 encoder_config.intermediate_size = 4096 encoder_config.num_hidden_layers = 24 encoder_config.num_attention_heads = 16 decoder_config.encoder_hidden_size = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL") # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: decoder_config.tie_word_embeddings = False decoder_config.activation_function = "relu" decoder_config.max_position_embeddings = 1024 decoder_config.scale_embedding = True decoder_config.use_learned_position_embeddings = False decoder_config.layernorm_embedding = False # load HuggingFace model encoder = ViTModel(encoder_config, add_pooling_layer=False) decoder = TrOCRForCausalLM(decoder_config) model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder) model.eval() # load state_dict of original model, rename some keys state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", check_hash=True)["model"] rename_keys = create_rename_keys(encoder_config, decoder_config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, encoder_config) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): val = state_dict.pop(key) if key.startswith("decoder") and "output_projection" not in key: state_dict["decoder.model." + key] = val else: state_dict[key] = val # load state dict model.load_state_dict(state_dict) # Check outputs on an image image_processor = ViTImageProcessor(size=encoder_config.image_size) tokenizer = RobertaTokenizer.from_pretrained("FacebookAI/roberta-large") processor = TrOCRProcessor(image_processor, tokenizer) pixel_values = processor(images=prepare_img(checkpoint_url), return_tensors="pt").pixel_values # verify logits decoder_input_ids = torch.tensor([[model.config.decoder.decoder_start_token_id]]) outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) logits = outputs.logits expected_shape = torch.Size([1, 1, 50265]) if "trocr-base-handwritten" in checkpoint_url: expected_slice = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: expected_slice = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: expected_slice = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: expected_slice = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10], expected_slice, atol=1e-3), "First elements of logits not as expected" Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) args = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/trocr/configuration_trocr.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TrOCR model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class TrOCRConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TrOCRForCausalLM`]. It is used to instantiate an TrOCR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TrOCR [microsoft/trocr-base-handwritten](https://huggingface.co/microsoft/trocr-base-handwritten) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the TrOCR model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`TrOCRForCausalLM`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). scale_embedding (`bool`, *optional*, defaults to `False`): Whether or not to scale the word embeddings by sqrt(d_model). use_learned_position_embeddings (`bool`, *optional*, defaults to `True`): Whether or not to use learned position embeddings. If not, sinusoidal position embeddings will be used. layernorm_embedding (`bool`, *optional*, defaults to `True`): Whether or not to use a layernorm after the word + position embeddings. Example: ```python >>> from transformers import TrOCRConfig, TrOCRForCausalLM >>> # Initializing a TrOCR-base style configuration >>> configuration = TrOCRConfig() >>> # Initializing a model (with random weights) from the TrOCR-base style configuration >>> model = TrOCRForCausalLM(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "trocr" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self, vocab_size=50265, d_model=1024, decoder_layers=12, decoder_attention_heads=16, decoder_ffn_dim=4096, activation_function="gelu", max_position_embeddings=512, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, decoder_start_token_id=2, init_std=0.02, decoder_layerdrop=0.0, use_cache=True, scale_embedding=False, use_learned_position_embeddings=True, layernorm_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.activation_function = activation_function self.max_position_embeddings = max_position_embeddings self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.init_std = init_std self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.scale_embedding = scale_embedding self.use_learned_position_embeddings = use_learned_position_embeddings self.layernorm_embedding = layernorm_embedding super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/encodec/modeling_encodec.py
# coding=utf-8 # Copyright 2023 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch EnCodec model.""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_encodec import EncodecConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "EncodecConfig" from ..deprecated._archive_maps import ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 @dataclass class EncodecOutput(ModelOutput): """ Args: audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*): Discret code embeddings computed using `model.encode`. audio_values (`torch.FlaotTensor` of shape `(batch_size, sequence_length)`, *optional*) Decoded audio values, obtained using the decoder part of Encodec. """ audio_codes: torch.LongTensor = None audio_values: torch.FloatTensor = None @dataclass class EncodecEncoderOutput(ModelOutput): """ Args: audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*): Discret code embeddings computed using `model.encode`. audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*): Scaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding. """ audio_codes: torch.LongTensor = None audio_scales: torch.FloatTensor = None @dataclass class EncodecDecoderOutput(ModelOutput): """ Args: audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*): Decoded audio values, obtained using the decoder part of Encodec. """ audio_values: torch.FloatTensor = None class EncodecConv1d(nn.Module): """Conv1d with asymmetric or causal padding and normalization.""" def __init__( self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1 ): super().__init__() self.causal = config.use_causal_conv self.pad_mode = config.pad_mode self.norm_type = config.norm_type if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) # warn user on unusual setup between dilation and stride if stride > 1 and dilation > 1: logger.warning( "EncodecConv1d has been initialized with stride > 1 and dilation > 1" f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})." ) self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, dilation=dilation) if self.norm_type == "weight_norm": self.conv = nn.utils.weight_norm(self.conv) elif self.norm_type == "time_group_norm": self.norm = nn.GroupNorm(1, out_channels) kernel_size = self.conv.kernel_size[0] stride = torch.tensor(self.conv.stride[0], dtype=torch.int64) dilation = self.conv.dilation[0] # Effective kernel size with dilations. kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64) self.register_buffer("stride", stride, persistent=False) self.register_buffer("kernel_size", kernel_size, persistent=False) self.register_buffer("padding_total", torch.tensor(kernel_size - stride, dtype=torch.int64), persistent=False) def _get_extra_padding_for_conv1d( self, hidden_states: torch.Tensor, ) -> torch.Tensor: """See `pad_for_conv1d`.""" length = hidden_states.shape[-1] n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1 n_frames = torch.ceil(n_frames).to(torch.int64) - 1 ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total return ideal_length - length @staticmethod def _pad1d(hidden_states: torch.Tensor, paddings: Tuple[int, int], mode: str = "zero", value: float = 0.0): """Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input. If this is the case, we insert extra 0 padding to the right before the reflection happens. """ length = hidden_states.shape[-1] padding_left, padding_right = paddings if not mode == "reflect": return nn.functional.pad(hidden_states, paddings, mode, value) max_pad = max(padding_left, padding_right) extra_pad = 0 if length <= max_pad: extra_pad = max_pad - length + 1 hidden_states = nn.functional.pad(hidden_states, (0, extra_pad)) padded = nn.functional.pad(hidden_states, paddings, mode, value) end = padded.shape[-1] - extra_pad return padded[..., :end] def forward(self, hidden_states): extra_padding = self._get_extra_padding_for_conv1d(hidden_states) if self.causal: # Left padding for causal hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode) else: # Asymmetric padding required for odd strides padding_right = self.padding_total // 2 padding_left = self.padding_total - padding_right hidden_states = self._pad1d( hidden_states, (padding_left, padding_right + extra_padding), mode=self.pad_mode ) hidden_states = self.conv(hidden_states) if self.norm_type == "time_group_norm": hidden_states = self.norm(hidden_states) return hidden_states class EncodecConvTranspose1d(nn.Module): """ConvTranspose1d with asymmetric or causal padding and normalization.""" def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1): super().__init__() self.causal = config.use_causal_conv self.trim_right_ratio = config.trim_right_ratio self.norm_type = config.norm_type if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride) if config.norm_type == "weight_norm": self.conv = nn.utils.weight_norm(self.conv) elif config.norm_type == "time_group_norm": self.norm = nn.GroupNorm(1, out_channels) if not (self.causal or self.trim_right_ratio == 1.0): raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions") def forward(self, hidden_states): kernel_size = self.conv.kernel_size[0] stride = self.conv.stride[0] padding_total = kernel_size - stride hidden_states = self.conv(hidden_states) if self.norm_type == "time_group_norm": hidden_states = self.norm(hidden_states) # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be # removed at the very end, when keeping only the right length for the output, # as removing it here would require also passing the length at the matching layer # in the encoder. if self.causal: # Trim the padding on the right according to the specified ratio # if trim_right_ratio = 1.0, trim everything from right padding_right = math.ceil(padding_total * self.trim_right_ratio) else: # Asymmetric padding required for odd strides padding_right = padding_total // 2 padding_left = padding_total - padding_right # unpad end = hidden_states.shape[-1] - padding_right hidden_states = hidden_states[..., padding_left:end] return hidden_states class EncodecLSTM(nn.Module): """ LSTM without worrying about the hidden state, nor the layout of the data. Expects input as convolutional layout. """ def __init__(self, config, dimension): super().__init__() self.lstm = nn.LSTM(dimension, dimension, config.num_lstm_layers) def forward(self, hidden_states): hidden_states = hidden_states.permute(2, 0, 1) hidden_states = self.lstm(hidden_states)[0] + hidden_states hidden_states = hidden_states.permute(1, 2, 0) return hidden_states class EncodecResnetBlock(nn.Module): """ Residual block from SEANet model as used by EnCodec. """ def __init__(self, config: EncodecConfig, dim: int, dilations: List[int]): super().__init__() kernel_sizes = (config.residual_kernel_size, 1) if len(kernel_sizes) != len(dilations): raise ValueError("Number of kernel sizes should match number of dilations") hidden = dim // config.compress block = [] for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): in_chs = dim if i == 0 else hidden out_chs = dim if i == len(kernel_sizes) - 1 else hidden block += [nn.ELU()] block += [EncodecConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)] self.block = nn.ModuleList(block) if config.use_conv_shortcut: self.shortcut = EncodecConv1d(config, dim, dim, kernel_size=1) else: self.shortcut = nn.Identity() def forward(self, hidden_states): residual = hidden_states for layer in self.block: hidden_states = layer(hidden_states) return self.shortcut(residual) + hidden_states class EncodecEncoder(nn.Module): """SEANet encoder as used by EnCodec.""" def __init__(self, config: EncodecConfig): super().__init__() model = [EncodecConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)] scaling = 1 # Downsample to raw audio scale for ratio in reversed(config.upsampling_ratios): current_scale = scaling * config.num_filters # Add residual layers for j in range(config.num_residual_layers): model += [EncodecResnetBlock(config, current_scale, [config.dilation_growth_rate**j, 1])] # Add downsampling layers model += [nn.ELU()] model += [EncodecConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)] scaling *= 2 model += [EncodecLSTM(config, scaling * config.num_filters)] model += [nn.ELU()] model += [EncodecConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)] self.layers = nn.ModuleList(model) def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states class EncodecDecoder(nn.Module): """SEANet decoder as used by EnCodec.""" def __init__(self, config: EncodecConfig): super().__init__() scaling = int(2 ** len(config.upsampling_ratios)) model = [EncodecConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)] model += [EncodecLSTM(config, scaling * config.num_filters)] # Upsample to raw audio scale for ratio in config.upsampling_ratios: current_scale = scaling * config.num_filters # Add upsampling layers model += [nn.ELU()] model += [ EncodecConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio) ] # Add residual layers for j in range(config.num_residual_layers): model += [EncodecResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))] scaling //= 2 # Add final layers model += [nn.ELU()] model += [EncodecConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)] self.layers = nn.ModuleList(model) def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states class EncodecEuclideanCodebook(nn.Module): """Codebook with Euclidean distance.""" def __init__(self, config: EncodecConfig): super().__init__() embed = torch.zeros(config.codebook_size, config.codebook_dim) self.codebook_size = config.codebook_size self.register_buffer("inited", torch.Tensor([True])) self.register_buffer("cluster_size", torch.zeros(config.codebook_size)) self.register_buffer("embed", embed) self.register_buffer("embed_avg", embed.clone()) def quantize(self, hidden_states): embed = self.embed.t() scaled_states = hidden_states.pow(2).sum(1, keepdim=True) dist = -(scaled_states - 2 * hidden_states @ embed + embed.pow(2).sum(0, keepdim=True)) embed_ind = dist.max(dim=-1).indices return embed_ind def encode(self, hidden_states): shape = hidden_states.shape # pre-process hidden_states = hidden_states.reshape((-1, shape[-1])) # quantize embed_ind = self.quantize(hidden_states) # post-process embed_ind = embed_ind.view(*shape[:-1]) return embed_ind def decode(self, embed_ind): quantize = nn.functional.embedding(embed_ind, self.embed) return quantize class EncodecVectorQuantization(nn.Module): """ Vector quantization implementation. Currently supports only euclidean distance. """ def __init__(self, config: EncodecConfig): super().__init__() self.codebook = EncodecEuclideanCodebook(config) def encode(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1) embed_in = self.codebook.encode(hidden_states) return embed_in def decode(self, embed_ind): quantize = self.codebook.decode(embed_ind) quantize = quantize.permute(0, 2, 1) return quantize class EncodecResidualVectorQuantizer(nn.Module): """Residual Vector Quantizer.""" def __init__(self, config: EncodecConfig): super().__init__() self.codebook_size = config.codebook_size self.frame_rate = config.frame_rate self.num_quantizers = config.num_quantizers self.layers = nn.ModuleList([EncodecVectorQuantization(config) for _ in range(config.num_quantizers)]) def get_num_quantizers_for_bandwidth(self, bandwidth: Optional[float] = None) -> int: """Return num_quantizers based on specified target bandwidth.""" bw_per_q = math.log2(self.codebook_size) * self.frame_rate num_quantizers = self.num_quantizers if bandwidth is not None and bandwidth > 0.0: num_quantizers = int(max(1, math.floor(bandwidth * 1000 / bw_per_q))) return num_quantizers def encode(self, embeddings: torch.Tensor, bandwidth: Optional[float] = None) -> torch.Tensor: """ Encode a given input tensor with the specified frame rate at the given bandwidth. The RVQ encode method sets the appropriate number of quantizers to use and returns indices for each quantizer. """ num_quantizers = self.get_num_quantizers_for_bandwidth(bandwidth) residual = embeddings all_indices = [] for layer in self.layers[:num_quantizers]: indices = layer.encode(residual) quantized = layer.decode(indices) residual = residual - quantized all_indices.append(indices) out_indices = torch.stack(all_indices) return out_indices def decode(self, codes: torch.Tensor) -> torch.Tensor: """Decode the given codes to the quantized representation.""" quantized_out = torch.tensor(0.0, device=codes.device) for i, indices in enumerate(codes): layer = self.layers[i] quantized = layer.decode(indices) quantized_out = quantized_out + quantized return quantized_out class EncodecPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = EncodecConfig base_model_prefix = "encodec" main_input_name = "input_values" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LSTM): for name, param in module.named_parameters(): if "weight" in name: nn.init.xavier_uniform_(param) elif "bias" in name: nn.init.constant_(param, 0.0) ENCODEC_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`EncodecConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ ENCODEC_INPUTS_DOCSTRING = r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*): Raw audio input converted to Float and padded to the approriate length in order to be encoded using chunks of length self.chunk_length and a stride of `config.chunk_stride`. padding_mask (`torch.BoolTensor` of shape `(batch_size, channels, sequence_length)`, *optional*): Mask to avoid computing scaling factors on padding token indices (can we avoid computing conv on these+). Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. <Tip warning={true}> `padding_mask` should always be passed, unless the input was truncated or not padded. This is because in order to process tensors effectively, the input audio should be padded so that `input_length % stride = step` with `step = chunk_length-stride`. This ensures that all chunks are of the same shape </Tip> bandwidth (`float`, *optional*): The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as `bandwidth == 6.0` audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*): Discret code embeddings computed using `model.encode`. audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*): Scaling factor for each `audio_codes` input. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The EnCodec neural audio codec model.", ENCODEC_START_DOCSTRING, ) class EncodecModel(EncodecPreTrainedModel): def __init__(self, config: EncodecConfig): super().__init__(config) self.config = config self.encoder = EncodecEncoder(config) self.decoder = EncodecDecoder(config) self.quantizer = EncodecResidualVectorQuantizer(config) self.bits_per_codebook = int(math.log2(self.config.codebook_size)) if 2**self.bits_per_codebook != self.config.codebook_size: raise ValueError("The codebook_size must be a power of 2.") # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _encode_frame( self, input_values: torch.Tensor, bandwidth: float, padding_mask: int ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """ Encodes the given input using the underlying VQVAE. If `config.normalize` is set to `True` the input is first normalized. The padding mask is required to compute the correct scale. """ length = input_values.shape[-1] duration = length / self.config.sampling_rate if self.config.chunk_length_s is not None and duration > 1e-5 + self.config.chunk_length_s: raise RuntimeError(f"Duration of frame ({duration}) is longer than chunk {self.config.chunk_length_s}") scale = None if self.config.normalize: # if the padding is non zero input_values = input_values * padding_mask mono = torch.sum(input_values, 1, keepdim=True) / input_values.shape[1] scale = mono.pow(2).mean(dim=-1, keepdim=True).sqrt() + 1e-8 input_values = input_values / scale embeddings = self.encoder(input_values) codes = self.quantizer.encode(embeddings, bandwidth) codes = codes.transpose(0, 1) return codes, scale def encode( self, input_values: torch.Tensor, padding_mask: torch.Tensor = None, bandwidth: Optional[float] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], EncodecEncoderOutput]: """ Encodes the input audio waveform into discrete codes. Args: input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Float values of the input audio waveform. padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Padding mask used to pad the `input_values`. bandwidth (`float`, *optional*): The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as bandwidth == 6.0 Returns: A list of frames containing the discrete encoded codes for the input audio waveform, along with rescaling factors for each chunk when `normalize` is True. Each frames is a tuple `(codebook, scale)`, with `codebook` of shape `[batch_size, num_codebooks, frames]`. """ return_dict = return_dict if return_dict is not None else self.config.return_dict if bandwidth is None: bandwidth = self.config.target_bandwidths[0] if bandwidth not in self.config.target_bandwidths: raise ValueError( f"This model doesn't support the bandwidth {bandwidth}. " f"Select one of {self.config.target_bandwidths}." ) _, channels, input_length = input_values.shape if channels < 1 or channels > 2: raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}") chunk_length = self.config.chunk_length if chunk_length is None: chunk_length = input_length stride = input_length else: stride = self.config.chunk_stride if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() encoded_frames = [] scales = [] step = chunk_length - stride if (input_length % stride) - step != 0: raise ValueError( "The input length is not properly padded for batched chunked decoding. Make sure to pad the input correctly." ) for offset in range(0, input_length - step, stride): mask = padding_mask[..., offset : offset + chunk_length].bool() frame = input_values[:, :, offset : offset + chunk_length] encoded_frame, scale = self._encode_frame(frame, bandwidth, mask) encoded_frames.append(encoded_frame) scales.append(scale) encoded_frames = torch.stack(encoded_frames) if not return_dict: return (encoded_frames, scales) return EncodecEncoderOutput(encoded_frames, scales) @staticmethod def _linear_overlap_add(frames: List[torch.Tensor], stride: int): # Generic overlap add, with linear fade-in/fade-out, supporting complex scenario # e.g., more than 2 frames per position. # The core idea is to use a weight function that is a triangle, # with a maximum value at the middle of the chunk. # We use this weighting when summing the frames, and divide by the sum of weights # for each positions at the end. Thus: # - if a frame is the only one to cover a position, the weighting is a no-op. # - if 2 frames cover a position: # ... ... # / \/ \ # / /\ \ # S T , i.e. S offset of second frame starts, T end of first frame. # Then the weight function for each one is: (t - S), (T - t), with `t` a given offset. # After the final normalization, the weight of the second frame at position `t` is # (t - S) / (t - S + (T - t)) = (t - S) / (T - S), which is exactly what we want. # # - if more than 2 frames overlap at a given point, we hope that by induction # something sensible happens. if len(frames) == 0: raise ValueError("`frames` cannot be an empty list.") device = frames[0].device dtype = frames[0].dtype shape = frames[0].shape[:-1] total_size = stride * (len(frames) - 1) + frames[-1].shape[-1] frame_length = frames[0].shape[-1] time_vec = torch.linspace(0, 1, frame_length + 2, device=device, dtype=dtype)[1:-1] weight = 0.5 - (time_vec - 0.5).abs() sum_weight = torch.zeros(total_size, device=device, dtype=dtype) out = torch.zeros(*shape, total_size, device=device, dtype=dtype) offset: int = 0 for frame in frames: frame_length = frame.shape[-1] out[..., offset : offset + frame_length] += weight[:frame_length] * frame sum_weight[offset : offset + frame_length] += weight[:frame_length] offset += stride if sum_weight.min() == 0: raise ValueError(f"`sum_weight` minimum element must be bigger than zero: {sum_weight}`") return out / sum_weight def _decode_frame(self, codes: torch.Tensor, scale: Optional[torch.Tensor] = None) -> torch.Tensor: codes = codes.transpose(0, 1) embeddings = self.quantizer.decode(codes) outputs = self.decoder(embeddings) if scale is not None: outputs = outputs * scale.view(-1, 1, 1) return outputs def decode( self, audio_codes: torch.Tensor, audio_scales: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecDecoderOutput]: """ Decodes the given frames into an output audio waveform. Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be trimmed. Args: audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*): Discret code embeddings computed using `model.encode`. audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*): Scaling factor for each `audio_codes` input. padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Padding mask used to pad the `input_values`. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ return_dict = return_dict or self.config.return_dict chunk_length = self.config.chunk_length if chunk_length is None: if len(audio_codes) != 1: raise ValueError(f"Expected one frame, got {len(audio_codes)}") audio_values = self._decode_frame(audio_codes[0], audio_scales[0]) else: decoded_frames = [] for frame, scale in zip(audio_codes, audio_scales): frames = self._decode_frame(frame, scale) decoded_frames.append(frames) audio_values = self._linear_overlap_add(decoded_frames, self.config.chunk_stride or 1) # truncate based on padding mask if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]: audio_values = audio_values[..., : padding_mask.shape[-1]] if not return_dict: return (audio_values,) return EncodecDecoderOutput(audio_values) @add_start_docstrings_to_model_forward(ENCODEC_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=EncodecOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, bandwidth: Optional[float] = None, audio_codes: Optional[torch.Tensor] = None, audio_scales: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecOutput]: r""" Returns: Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoProcessor, EncodecModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model_id = "facebook/encodec_24khz" >>> model = EncodecModel.from_pretrained(model_id) >>> processor = AutoProcessor.from_pretrained(model_id) >>> inputs = processor(raw_audio=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> audio_codes = outputs.audio_codes >>> audio_values = outputs.audio_values ```""" return_dict = return_dict or self.config.return_dict if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() if audio_codes is not None and audio_scales is None: raise ValueError("You specified `audio_codes` but did not specify the `audio_scales`") if audio_scales is not None and audio_codes is None: raise ValueError("You specified `audio_scales` but did not specify the `audio_codes`") if audio_scales is None and audio_codes is None: audio_codes, audio_scales = self.encode(input_values, padding_mask, bandwidth, False) audio_values = self.decode(audio_codes, audio_scales, padding_mask, return_dict=return_dict)[0] if not return_dict: return (audio_codes, audio_values) return EncodecOutput(audio_codes=audio_codes, audio_values=audio_values)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert EnCodec checkpoints.""" import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() logger = logging.get_logger("transformers.models.encodec") MAPPING_QUANTIZER = { "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited", "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size", "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed", "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg", } MAPPING_ENCODER = { "encoder.model.0.conv.conv": "encoder.layers.0.conv", "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv", "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv", "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv", "encoder.model.3.conv.conv": "encoder.layers.3.conv", "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv", "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv", "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv", "encoder.model.6.conv.conv": "encoder.layers.6.conv", "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv", "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv", "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv", "encoder.model.9.conv.conv": "encoder.layers.9.conv", "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv", "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv", "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv", "encoder.model.12.conv.conv": "encoder.layers.12.conv", "encoder.model.13.lstm": "encoder.layers.13.lstm", "encoder.model.15.conv.conv": "encoder.layers.15.conv", } MAPPING_ENCODER_48K = { "encoder.model.0.conv.norm": "encoder.layers.0.norm", "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm", "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm", "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm", "encoder.model.3.conv.norm": "encoder.layers.3.norm", "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm", "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm", "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm", "encoder.model.6.conv.norm": "encoder.layers.6.norm", "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm", "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm", "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm", "encoder.model.9.conv.norm": "encoder.layers.9.norm", "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm", "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm", "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm", "encoder.model.12.conv.norm": "encoder.layers.12.norm", "encoder.model.15.conv.norm": "encoder.layers.15.norm", } MAPPING_DECODER = { "decoder.model.0.conv.conv": "decoder.layers.0.conv", "decoder.model.1.lstm": "decoder.layers.1.lstm", "decoder.model.3.convtr.convtr": "decoder.layers.3.conv", "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv", "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv", "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv", "decoder.model.6.convtr.convtr": "decoder.layers.6.conv", "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv", "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv", "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv", "decoder.model.9.convtr.convtr": "decoder.layers.9.conv", "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv", "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv", "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv", "decoder.model.12.convtr.convtr": "decoder.layers.12.conv", "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv", "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv", "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv", "decoder.model.15.conv.conv": "decoder.layers.15.conv", } MAPPING_DECODER_48K = { "decoder.model.0.conv.norm": "decoder.layers.0.norm", "decoder.model.3.convtr.norm": "decoder.layers.3.norm", "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm", "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm", "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm", "decoder.model.6.convtr.norm": "decoder.layers.6.norm", "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm", "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm", "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm", "decoder.model.9.convtr.norm": "decoder.layers.9.norm", "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm", "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm", "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm", "decoder.model.12.convtr.norm": "decoder.layers.12.norm", "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm", "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm", "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm", "decoder.model.15.conv.norm": "decoder.layers.15.norm", } MAPPING_24K = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } MAPPING_48K = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } TOP_LEVEL_KEYS = [] IGNORE_KEYS = [] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value elif weight_type == "running_mean": hf_pointer.running_mean.data = value elif weight_type == "running_var": hf_pointer.running_var.data = value elif weight_type == "num_batches_tracked": hf_pointer.num_batches_tracked.data = value elif weight_type == "weight_ih_l0": hf_pointer.weight_ih_l0.data = value elif weight_type == "weight_hh_l0": hf_pointer.weight_hh_l0.data = value elif weight_type == "bias_ih_l0": hf_pointer.bias_ih_l0.data = value elif weight_type == "bias_hh_l0": hf_pointer.bias_hh_l0.data = value elif weight_type == "weight_ih_l1": hf_pointer.weight_ih_l1.data = value elif weight_type == "weight_hh_l1": hf_pointer.weight_hh_l1.data = value elif weight_type == "bias_ih_l1": hf_pointer.bias_ih_l1.data = value elif weight_type == "bias_hh_l1": hf_pointer.bias_hh_l1.data = value else: hf_pointer.data = value logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.") def should_ignore(name, ignore_keys): for key in ignore_keys: if key.endswith(".*"): if name.startswith(key[:-1]): return True elif ".*." in key: prefix, suffix = key.split(".*.") if prefix in name and suffix in name: return True elif key in name: return True return False def recursively_load_weights(orig_dict, hf_model, model_name): unused_weights = [] if model_name == "encodec_24khz" or "encodec_32khz": MAPPING = MAPPING_24K elif model_name == "encodec_48khz": MAPPING = MAPPING_48K else: raise ValueError(f"Unsupported model: {model_name}") for name, value in orig_dict.items(): if should_ignore(name, IGNORE_KEYS): logger.info(f"{name} was ignored") continue is_used = False for key, mapped_key in MAPPING.items(): if "*" in key: prefix, suffix = key.split(".*.") if prefix in name and suffix in name: key = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed") and name.endswith("embed_avg"): continue is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "weight_ih_l0" in name: weight_type = "weight_ih_l0" elif "weight_hh_l0" in name: weight_type = "weight_hh_l0" elif "bias_ih_l0" in name: weight_type = "bias_ih_l0" elif "bias_hh_l0" in name: weight_type = "bias_hh_l0" elif "weight_ih_l1" in name: weight_type = "weight_ih_l1" elif "weight_hh_l1" in name: weight_type = "weight_hh_l1" elif "bias_ih_l1" in name: weight_type = "bias_ih_l1" elif "bias_hh_l1" in name: weight_type = "bias_hh_l1" elif "bias" in name: weight_type = "bias" elif "weight" in name: weight_type = "weight" elif "running_mean" in name: weight_type = "running_mean" elif "running_var" in name: weight_type = "running_var" elif "num_batches_tracked" in name: weight_type = "num_batches_tracked" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") @torch.no_grad() def convert_checkpoint( model_name, checkpoint_path, pytorch_dump_folder_path, config_path=None, repo_id=None, ): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = EncodecConfig.from_pretrained(config_path) else: config = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": config.upsampling_ratios = [8, 5, 4, 4] config.target_bandwidths = [2.2] config.num_filters = 64 config.sampling_rate = 32_000 config.codebook_size = 2048 config.use_causal_conv = False config.normalize = False config.use_conv_shortcut = False elif model_name == "encodec_48khz": config.upsampling_ratios = [8, 5, 4, 2] config.target_bandwidths = [3.0, 6.0, 12.0, 24.0] config.sampling_rate = 48_000 config.audio_channels = 2 config.use_causal_conv = False config.norm_type = "time_group_norm" config.normalize = True config.chunk_length_s = 1.0 config.overlap = 0.01 else: raise ValueError(f"Unknown model name: {model_name}") model = EncodecModel(config) feature_extractor = EncodecFeatureExtractor( feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, ) feature_extractor.save_pretrained(pytorch_dump_folder_path) original_checkpoint = torch.load(checkpoint_path) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights original_checkpoint = original_checkpoint["best_state"] recursively_load_weights(original_checkpoint, model, model_name) model.save_pretrained(pytorch_dump_folder_path) if repo_id: print("Pushing to the hub...") feature_extractor.push_to_hub(repo_id) model.push_to_hub(repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model", default="encodec_24khz", type=str, help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) args = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/encodec/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _import_structure = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_encodec"] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/encodec/feature_extraction_encodec.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for EnCodec.""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging logger = logging.get_logger(__name__) class EncodecFeatureExtractor(SequenceFeatureExtractor): r""" Constructs an EnCodec feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Instantiating a feature extractor with the defaults will yield a similar configuration to that of the [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. Use 1 for mono, 2 for stereo. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values. chunk_length_s (`float`, *optional*): If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded. overlap (`float`, *optional*): Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following formulae : `int((1.0 - self.overlap) * self.chunk_length)`. """ model_input_names = ["input_values", "padding_mask"] def __init__( self, feature_size: int = 1, sampling_rate: int = 24000, padding_value: float = 0.0, chunk_length_s: float = None, overlap: float = None, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.chunk_length_s = chunk_length_s self.overlap = overlap # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_length(self) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_stride(self) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length)) def __call__( self, raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Optional[Union[bool, str, PaddingStrategy]] = None, truncation: Optional[bool] = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio (`feature_size = 2`). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, *optional*, defaults to `False`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one.") elif padding is None: # by default let's pad the inputs padding = True is_batched = bool( isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list))) ) if is_batched: raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio] elif not is_batched and not isinstance(raw_audio, np.ndarray): raw_audio = np.asarray(raw_audio, dtype=np.float32) elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64): raw_audio = raw_audio.astype(np.float32) # always return batch if not is_batched: raw_audio = [np.asarray(raw_audio).T] # verify inputs are valid for idx, example in enumerate(raw_audio): if example.ndim > 2: raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}") if self.feature_size == 1 and example.ndim != 1: raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels") if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels") padded_inputs = None input_values = BatchFeature({"input_values": raw_audio}) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: max_length = min(array.shape[0] for array in raw_audio) nb_step = int(np.floor(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: max_length = max(array.shape[0] for array in raw_audio) nb_step = int(np.ceil(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length padding = "max_length" else: padded_inputs = input_values # normal padding on batch if padded_inputs is None: padded_inputs = self.pad( input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=padding, ) if padding: padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask") input_values = [] for example in padded_inputs.pop("input_values"): if self.feature_size == 1: example = example[..., None] input_values.append(example.T) padded_inputs["input_values"] = input_values if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/encodec/configuration_encodec.py
# coding=utf-8 # Copyright 2023 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ EnCodec model configuration""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class EncodecConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`EncodecModel`]. It is used to instantiate a Encodec model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: target_bandwidths (`List[float]`, *optional*, defaults to `[1.5, 3.0, 6.0, 12.0, 24.0]`): The range of diffent bandwiths the model can encode audio with. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). audio_channels (`int`, *optional*, defaults to 1): Number of channels in the audio data. Either 1 for mono or 2 for stereo. normalize (`bool`, *optional*, defaults to `False`): Whether the audio shall be normalized when passed. chunk_length_s (`float`, *optional*): If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded. overlap (`float`, *optional*): Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following formulae : `int((1.0 - self.overlap) * self.chunk_length)`. hidden_size (`int`, *optional*, defaults to 128): Intermediate representation dimension. num_filters (`int`, *optional*, defaults to 32): Number of convolution kernels of first `EncodecConv1d` down sampling layer. num_residual_layers (`int`, *optional*, defaults to 1): Number of residual layers. upsampling_ratios (`Sequence[int]` , *optional*, defaults to `[8, 5, 4, 2]`): Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here that must match the decoder order. norm_type (`str`, *optional*, defaults to `"weight_norm"`): Normalization method. Should be in `["weight_norm", "time_group_norm"]` kernel_size (`int`, *optional*, defaults to 7): Kernel size for the initial convolution. last_kernel_size (`int`, *optional*, defaults to 7): Kernel size for the last convolution layer. residual_kernel_size (`int`, *optional*, defaults to 3): Kernel size for the residual layers. dilation_growth_rate (`int`, *optional*, defaults to 2): How much to increase the dilation with each layer. use_causal_conv (`bool`, *optional*, defaults to `True`): Whether to use fully causal convolution. pad_mode (`str`, *optional*, defaults to `"reflect"`): Padding mode for the convolutions. compress (`int`, *optional*, defaults to 2): Reduced dimensionality in residual branches (from Demucs v3). num_lstm_layers (`int`, *optional*, defaults to 2): Number of LSTM layers at the end of the encoder. trim_right_ratio (`float`, *optional*, defaults to 1.0): Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If equal to 1.0, it means that all the trimming is done at the right. codebook_size (`int`, *optional*, defaults to 1024): Number of discret codes that make up VQVAE. codebook_dim (`int`, *optional*): Dimension of the codebook vectors. If not defined, uses `hidden_size`. use_conv_shortcut (`bool`, *optional*, defaults to `True`): Whether to use a convolutional layer as the 'skip' connection in the `EncodecResnetBlock` block. If False, an identity function will be used, giving a generic residual connection. Example: ```python >>> from transformers import EncodecModel, EncodecConfig >>> # Initializing a "facebook/encodec_24khz" style configuration >>> configuration = EncodecConfig() >>> # Initializing a model (with random weights) from the "facebook/encodec_24khz" style configuration >>> model = EncodecModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "encodec" def __init__( self, target_bandwidths=[1.5, 3.0, 6.0, 12.0, 24.0], sampling_rate=24_000, audio_channels=1, normalize=False, chunk_length_s=None, overlap=None, hidden_size=128, num_filters=32, num_residual_layers=1, upsampling_ratios=[8, 5, 4, 2], norm_type="weight_norm", kernel_size=7, last_kernel_size=7, residual_kernel_size=3, dilation_growth_rate=2, use_causal_conv=True, pad_mode="reflect", compress=2, num_lstm_layers=2, trim_right_ratio=1.0, codebook_size=1024, codebook_dim=None, use_conv_shortcut=True, **kwargs, ): self.target_bandwidths = target_bandwidths self.sampling_rate = sampling_rate self.audio_channels = audio_channels self.normalize = normalize self.chunk_length_s = chunk_length_s self.overlap = overlap self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios self.norm_type = norm_type self.kernel_size = kernel_size self.last_kernel_size = last_kernel_size self.residual_kernel_size = residual_kernel_size self.dilation_growth_rate = dilation_growth_rate self.use_causal_conv = use_causal_conv self.pad_mode = pad_mode self.compress = compress self.num_lstm_layers = num_lstm_layers self.trim_right_ratio = trim_right_ratio self.codebook_size = codebook_size self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size self.use_conv_shortcut = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) super().__init__(**kwargs) # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_length(self) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_stride(self) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length)) @property def frame_rate(self) -> int: hop_length = np.prod(self.upsampling_ratios) return math.ceil(self.sampling_rate / hop_length) @property def num_quantizers(self) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes to support Encoder-Decoder architectures""" import gc import inspect import os import tempfile import warnings from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...configuration_utils import PretrainedConfig from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..auto.configuration_auto import AutoConfig from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM from .configuration_encoder_decoder import EncoderDecoderConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "EncoderDecoderConfig" DEPRECATION_WARNING = ( "Version v4.12.0 introduces a better way to train encoder-decoder models by computing the loss inside the" " encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if" " fine-tuning a model trained with versions anterior to 4.12.0. The decoder_input_ids are now created based on the" " labels, no need to pass them yourself anymore." ) ENCODER_DECODER_START_DOCSTRING = r""" This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization. The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information). This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ ENCODER_DECODER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. encoder_outputs (`tuple(torch.FloatTensor)`, *optional*): This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple. kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function. """ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() if decoder_start_token_id is None: raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING) class EncoderDecoderModel(PreTrainedModel): r""" [`EncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and another one as decoder when created with the :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder. """ config_class = EncoderDecoderConfig base_model_prefix = "encoder_decoder" main_input_name = "input_ids" supports_gradient_checkpointing = True def __init__( self, config: Optional[PretrainedConfig] = None, encoder: Optional[PreTrainedModel] = None, decoder: Optional[PreTrainedModel] = None, ): if config is None and (encoder is None or decoder is None): raise ValueError("Either a configuration or an encoder and a decoder has to be provided.") if config is None: config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config) else: if not isinstance(config, self.config_class): raise ValueError(f"Config: {config} has to be of type {self.config_class}") if config.decoder.cross_attention_hidden_size is not None: if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: raise ValueError( "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal" f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for" " `config.encoder.hidden_size`." ) # initialize with config super().__init__(config) if encoder is None: from ..auto.modeling_auto import AutoModel encoder = AutoModel.from_config(config.encoder, attn_implementation=config._attn_implementation) if decoder is None: from ..auto.modeling_auto import AutoModelForCausalLM decoder = AutoModelForCausalLM.from_config(config.decoder, attn_implementation=config._attn_implementation) self.encoder = encoder self.decoder = decoder if self.encoder.config.to_dict() != self.config.encoder.to_dict(): logger.warning( f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:" f" {self.config.encoder}" ) if self.decoder.config.to_dict() != self.config.decoder.to_dict(): logger.warning( f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:" f" {self.config.decoder}" ) # make sure that the individual model's config refers to the shared config # so that the updates to the config will be synced self.encoder.config = self.config.encoder self.decoder.config = self.config.decoder # encoder outputs might need to be projected to different dimension for decoder if ( self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size) if self.encoder.get_output_embeddings() is not None: raise ValueError( f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head" ) decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys()) if "encoder_hidden_states" not in decoder_signature: raise ValueError( "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the " "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350" ) # tie encoder, decoder weights if config set accordingly self.tie_weights() def tie_weights(self): # tie encoder & decoder if needed if self.config.tie_encoder_decoder: # tie encoder and decoder base model decoder_base_model_prefix = self.decoder.base_model_prefix tied_weights = self._tie_encoder_decoder_weights( self.encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix, "encoder", ) # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class # attributed not an instance member, therefore modifying it will modify the entire class # Leading to issues on subsequent calls by different tests or subsequent calls. self._dynamic_tied_weights_keys = tied_weights def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def get_input_embeddings(self): return self.encoder.get_input_embeddings() def get_output_embeddings(self): return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.decoder.set_output_embeddings(new_embeddings) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Example: ```python >>> from transformers import EncoderDecoderModel >>> model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") ```""" from_tf = kwargs.pop("from_tf", False) if from_tf: from transformers import TFEncoderDecoderModel # a workaround to load from tensorflow checkpoint # Using `_tf_model` won't work, because the weight names in the encoder/decoder of `_tf_model` get # extended before saving those components. For example, The name of `_tf_model.encoder.vit` is # `[top model name]/encoder/vit`, but the name of `tf_model.encoder.vit` is `[top model name]/vit`. The # [top model name] is handled (stripped) by the conversion method, and the former case gets extra `encoder`, # which should not occur when we want to save the components alone. # There was a (very) ugly potential fix, which wasn't integrated to `transformers`: see # https://github.com/huggingface/transformers/pull/13222/commits/dbb3c9de76eee235791d2064094654637c99f36d#r697304245 # (the change in `src/transformers/modeling_tf_utils.py`) _tf_model = TFEncoderDecoderModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) config = _tf_model.config # Using `tf_model` instead encoder = _tf_model.encoder.__class__(_tf_model.config.encoder) decoder = _tf_model.decoder.__class__(_tf_model.config.decoder) # Make sure models are built encoder(encoder.dummy_inputs) decoder(decoder.dummy_inputs) # Get the variable correspondence between `_tf_model` and `encoder` and `decoder` encoder_variables = {} for v in encoder.trainable_variables + encoder.non_trainable_variables: encoder_variables["/".join(v.name.split("/")[1:])] = v decoder_variables = {} for v in decoder.trainable_variables + decoder.non_trainable_variables: decoder_variables["/".join(v.name.split("/")[1:])] = v _encoder_variables = {} for v in _tf_model.encoder.trainable_variables + _tf_model.encoder.non_trainable_variables: _encoder_variables["/".join(v.name.split("/")[2:])] = v _decoder_variables = {} for v in _tf_model.decoder.trainable_variables + _tf_model.decoder.non_trainable_variables: _decoder_variables["/".join(v.name.split("/")[2:])] = v # assign weight values to `encoder` and `decoder` from `_tf_model` for name, v in encoder_variables.items(): v.assign(_encoder_variables[name]) for name, v in decoder_variables.items(): v.assign(_decoder_variables[name]) tf_model = TFEncoderDecoderModel(encoder=encoder, decoder=decoder) # Deal with `enc_to_dec_proj` if hasattr(_tf_model, "enc_to_dec_proj"): tf_model(tf_model.dummy_inputs) tf_model.enc_to_dec_proj.kernel.assign(_tf_model.enc_to_dec_proj.kernel) tf_model.enc_to_dec_proj.bias.assign(_tf_model.enc_to_dec_proj.bias) with tempfile.TemporaryDirectory() as tmpdirname: encoder_dir = os.path.join(tmpdirname, "encoder") decoder_dir = os.path.join(tmpdirname, "decoder") tf_model.encoder.save_pretrained(encoder_dir) tf_model.decoder.save_pretrained(decoder_dir) if hasattr(tf_model, "enc_to_dec_proj"): enc_to_dec_proj_weight = torch.transpose( torch.from_numpy(tf_model.enc_to_dec_proj.kernel.numpy()), 1, 0 ) enc_to_dec_proj_bias = torch.from_numpy(tf_model.enc_to_dec_proj.bias.numpy()) del _tf_model del tf_model gc.collect() model = EncoderDecoderModel.from_encoder_decoder_pretrained( encoder_dir, decoder_dir, encoder_from_tf=True, decoder_from_tf=True ) # This is only for copying some specific attributes of this particular model. model.config = config if hasattr(model, "enc_to_dec_proj"): model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight.contiguous() model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias.contiguous() return model # At the moment fast initialization is not supported for composite models if kwargs.get("_fast_init", False): logger.warning( "Fast initialization is currently not supported for EncoderDecoderModel. " "Falling back to slow initialization..." ) kwargs["_fast_init"] = False return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) @classmethod def from_encoder_decoder_pretrained( cls, encoder_pretrained_model_name_or_path: str = None, decoder_pretrained_model_name_or_path: str = None, *model_args, **kwargs, ) -> PreTrainedModel: r""" Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the decoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case, `from_tf` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter. - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import EncoderDecoderModel >>> # initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased") >>> # saving model after fine-tuning >>> model.save_pretrained("./bert2bert") >>> # load fine-tuned model >>> model = EncoderDecoderModel.from_pretrained("./bert2bert") ```""" kwargs_encoder = { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } # remove encoder, decoder kwargs from kwargs for key in kwargs_encoder.keys(): del kwargs["encoder_" + key] for key in kwargs_decoder.keys(): del kwargs["decoder_" + key] # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. encoder = kwargs_encoder.pop("model", None) if encoder is None: if encoder_pretrained_model_name_or_path is None: raise ValueError( "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_encoder: encoder_config, kwargs_encoder = AutoConfig.from_pretrained( encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True ) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info( f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model " "from a decoder model. Cross-attention and casual mask are disabled." ) encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_encoder["config"] = encoder_config encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder) decoder = kwargs_decoder.pop("model", None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError( "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_decoder: decoder_config, kwargs_decoder = AutoConfig.from_pretrained( decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True ) if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info( f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." ) decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder["config"] = decoder_config if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: logger.warning( f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a " "`decoder_config` to `.from_encoder_decoder_pretrained(...)`" ) decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) # instantiate config with corresponding kwargs config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) return cls(encoder=encoder, decoder=decoder, config=config) @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, past_key_values: Tuple[Tuple[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple, Seq2SeqLMOutput]: r""" Returns: Examples: ```python >>> from transformers import EncoderDecoderModel, BertTokenizer >>> import torch >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained( ... "google-bert/bert-base-uncased", "google-bert/bert-base-uncased" ... ) # initialize Bert2Bert from pre-trained checkpoints >>> # training >>> model.config.decoder_start_token_id = tokenizer.cls_token_id >>> model.config.pad_token_id = tokenizer.pad_token_id >>> model.config.vocab_size = model.config.decoder.vocab_size >>> input_ids = tokenizer("This is a really long text", return_tensors="pt").input_ids >>> labels = tokenizer("This is the corresponding summary", return_tensors="pt").input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss, logits = outputs.loss, outputs.logits >>> # save and load from pretrained >>> model.save_pretrained("bert2bert") >>> model = EncoderDecoderModel.from_pretrained("bert2bert") >>> # generation >>> generated = model.generate(input_ids) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")} kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs_encoder, ) elif isinstance(encoder_outputs, tuple): encoder_outputs = BaseModelOutput(*encoder_outputs) encoder_hidden_states = encoder_outputs[0] # optionally project encoder_hidden_states if ( self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None): decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.new_tensor(decoder_input_ids != self.config.pad_token_id) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, past_key_values=past_key_values, return_dict=return_dict, **kwargs_decoder, ) # Compute loss independent from decoder (as some shift the logits inside them) loss = None if labels is not None: warnings.warn(DEPRECATION_WARNING, FutureWarning) logits = decoder_outputs.logits if return_dict else decoder_outputs[0] loss_fct = CrossEntropyLoss() loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1)) if not return_dict: if loss is not None: return (loss,) + decoder_outputs + encoder_outputs else: return decoder_outputs + encoder_outputs return Seq2SeqLMOutput( loss=loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values) decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None input_dict = { "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "decoder_input_ids": decoder_inputs["input_ids"], "encoder_outputs": encoder_outputs, "past_key_values": decoder_inputs["past_key_values"], "use_cache": use_cache, } return input_dict def resize_token_embeddings(self, *args, **kwargs): raise NotImplementedError( "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the" " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or" " model.decoder.resize_token_embeddings(...))" ) def _reorder_cache(self, past_key_values, beam_idx): # apply decoder cache reordering here return self.decoder._reorder_cache(past_key_values, beam_idx)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class EncoderDecoderConfig(PretrainedConfig): r""" [`EncoderDecoderConfig`] is the configuration class to store the configuration of a [`EncoderDecoderModel`]. It is used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder configs. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: kwargs (*optional*): Dictionary of keyword arguments. Notably: - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the encoder config. - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the decoder config. Examples: ```python >>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel >>> # Initializing a BERT google-bert/bert-base-uncased style configuration >>> config_encoder = BertConfig() >>> config_decoder = BertConfig() >>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) >>> # Initializing a Bert2Bert model (with random weights) from the google-bert/bert-base-uncased style configurations >>> model = EncoderDecoderModel(config=config) >>> # Accessing the model configuration >>> config_encoder = model.config.encoder >>> config_decoder = model.config.decoder >>> # set decoder config to causal lm >>> config_decoder.is_decoder = True >>> config_decoder.add_cross_attention = True >>> # Saving the model, including its configuration >>> model.save_pretrained("my-model") >>> # loading model and config from pretrained folder >>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model") >>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config) ```""" model_type = "encoder-decoder" is_composition = True def __init__(self, **kwargs): super().__init__(**kwargs) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" encoder_config = kwargs.pop("encoder") encoder_model_type = encoder_config.pop("model_type") decoder_config = kwargs.pop("decoder") decoder_model_type = decoder_config.pop("model_type") from ..auto.configuration_auto import AutoConfig self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config) self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config) self.is_encoder_decoder = True @classmethod def from_encoder_decoder_configs( cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs ) -> PretrainedConfig: r""" Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and decoder model configuration. Returns: [`EncoderDecoderConfig`]: An instance of a configuration object """ logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config") decoder_config.is_decoder = True decoder_config.add_cross_attention = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/encoder_decoder/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _import_structure = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_encoder_decoder"] = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_encoder_decoder"] = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_encoder_decoder"] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes to support TF Encoder-Decoder architectures""" from __future__ import annotations import inspect import re import warnings from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...configuration_utils import PretrainedConfig from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, get_initializer, keras, unpack_inputs, ) from ...tf_utils import shape_list from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ..auto.configuration_auto import AutoConfig from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM from .configuration_encoder_decoder import EncoderDecoderConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "EncoderDecoderConfig" DEPRECATION_WARNING = ( "Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the" " encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if" " fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the" " labels, no need to pass them yourself anymore." ) ENCODER_DECODER_START_DOCSTRING = r""" This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via [`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization. The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information). This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. Parameters: config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ ENCODER_DECODER_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). Provide for sequence to sequence training to the decoder. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*): This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `({0})`. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. - With a *decoder_* prefix which will be input as `**decoder_kwargs`` for the decoder forward function. """ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): if pad_token_id is None: raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") pad_token_id = tf.cast(pad_token_id, input_ids.dtype) if decoder_start_token_id is None: raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING) class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss): r""" [`TFEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one of the base model classes of the library as encoder and another one as decoder when created with the [`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class method for the decoder. """ config_class = EncoderDecoderConfig base_model_prefix = "encoder_decoder" load_weight_prefix = "tf_encoder_decoder_model" def __init__( self, config: Optional[PretrainedConfig] = None, encoder: Optional[TFPreTrainedModel] = None, decoder: Optional[TFPreTrainedModel] = None, ): if config is None and (encoder is None or decoder is None): raise ValueError("Either a configuration or an encoder and a decoder has to be provided.") if config is None: config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config) else: if not isinstance(config, self.config_class): raise ValueError(f"config: {config} has to be of type {self.config_class}") if config.decoder.cross_attention_hidden_size is not None: if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: raise ValueError( "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal" f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for" " `config.encoder.hidden_size`." ) # initialize with config super().__init__(config) if encoder is None: encoder = TFAutoModel.from_config(config.encoder, name="encoder") if decoder is None: decoder = TFAutoModelForCausalLM.from_config(config.decoder, name="decoder") self.encoder = encoder self.decoder = decoder if self.encoder.config.to_dict() != self.config.encoder.to_dict(): logger.warning( f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:" f" {self.config.encoder}" ) if self.decoder.config.to_dict() != self.config.decoder.to_dict(): logger.warning( f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:" f" {self.config.decoder}" ) # make sure that the individual model's config refers to the shared config # so that the updates to the config will be synced self.encoder.config = self.config.encoder self.decoder.config = self.config.decoder # encoder outputs might need to be projected to different dimension for decoder if ( self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): self.enc_to_dec_proj = keras.layers.Dense( units=self.decoder.config.hidden_size, kernel_initializer=get_initializer(config.encoder.initializer_range), name="enc_to_dec_proj", ) if self.encoder.get_output_embeddings() is not None: raise ValueError( f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head" ) decoder_signature = set(inspect.signature(self.decoder.call).parameters.keys()) if "encoder_hidden_states" not in decoder_signature: raise ValueError( "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the " "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350" ) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def get_input_embeddings(self): return self.encoder.get_input_embeddings() def get_output_embeddings(self): return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.decoder.set_output_embeddings(new_embeddings) def tf_to_pt_weight_rename(self, tf_weight): # Matt: The TF and PT weights don't align because our TF base classes have an extra layer compared to PT models # (the main model stem is in the MainLayer class). If we remove that layer, then weight names sync up as normal. # However, the name of that extra layer is the name of the MainLayer in the base model. We make the assumption # here that the config model_type is the same as the name of the MainLayer. I don't know of anywhere that's # not the case, and I wasn't sure how else to go from the config to the correct MainLayer name! # This override is only needed in the case where we're crossloading weights from PT. However, since weights are # often safetensors now, we don't know if we're going to be crossloading until we sniff the weights file. # Therefore, we specify tf_to_pt_weight_rename anyway, and let the super method figure out if it needs it # or not. encoder_model_type = self.config.encoder.model_type if "encoder" in tf_weight and "decoder" not in tf_weight: return (re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight),) else: return (tf_weight,) @classmethod def from_encoder_decoder_pretrained( cls, encoder_pretrained_model_name_or_path: str = None, decoder_pretrained_model_name_or_path: str = None, *model_args, **kwargs, ) -> TFPreTrainedModel: r""" Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints. Params: encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case, `encoder_from_pt` should be set to `True`. decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the decoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case, `decoder_from_pt` should be set to `True`. model_args (remaining positional arguments, *optional*): All remaning positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter. - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import TFEncoderDecoderModel >>> # initialize a bert2gpt2 from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "openai-community/gpt2") >>> # saving model after fine-tuning >>> model.save_pretrained("./bert2gpt2") >>> # load fine-tuned model >>> model = TFEncoderDecoderModel.from_pretrained("./bert2gpt2") ```""" kwargs_encoder = { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } # remove encoder, decoder kwargs from kwargs for key in kwargs_encoder.keys(): del kwargs["encoder_" + key] for key in kwargs_decoder.keys(): del kwargs["decoder_" + key] # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. encoder = kwargs_encoder.pop("model", None) if encoder is None: if encoder_pretrained_model_name_or_path is None: raise ValueError( "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_encoder: encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info( f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model " "from a decoder model. Cross-attention and casual mask are disabled." ) encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_encoder["config"] = encoder_config kwargs_encoder["name"] = "encoder" kwargs_encoder["load_weight_prefix"] = cls.load_weight_prefix encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder) decoder = kwargs_decoder.pop("model", None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError( "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_decoder: decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path) if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info( f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." ) decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder["config"] = decoder_config if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: logger.warning( f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a " "`decoder_config` to `.from_encoder_decoder_pretrained(...)`" ) kwargs_decoder["name"] = "decoder" kwargs_decoder["load_weight_prefix"] = cls.load_weight_prefix decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) # Make sure these 2 `keras.Model` have fixed names so `from_pretrained` could load model weights correctly. if encoder.name != "encoder": raise ValueError("encoder model must be created with the name `encoder`.") if decoder.name != "decoder": raise ValueError("decoder model must be created with the name `decoder`.") # instantiate config with corresponding kwargs config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) return cls(encoder=encoder, decoder=decoder, config=config) @unpack_inputs @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: Tuple[Tuple[tf.Tensor]] | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None, labels: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, **kwargs, ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]: r""" Returns: Examples: ```python >>> from transformers import TFEncoderDecoderModel, BertTokenizer >>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2") >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> # forward >>> input_ids = tokenizer.encode( ... "Hello, my dog is cute", add_special_tokens=True, return_tensors="tf" ... ) # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids) >>> # training >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids) >>> loss, logits = outputs.loss, outputs.logits >>> # save and load from pretrained >>> model.save_pretrained("bert2gpt2") >>> model = TFEncoderDecoderModel.from_pretrained("bert2gpt2") >>> # generation >>> generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.bos_token_id) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")} kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } # Let the user be responsible for the expected format. if encoder_outputs is not None: if return_dict and not isinstance(encoder_outputs, ModelOutput): raise ValueError( "If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of " f"`ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`." ) if encoder_outputs is None: encoder_inputs = { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "training": training, } # Add arguments to encoder from `kwargs_encoder` encoder_inputs.update(kwargs_encoder) # Handle the case where the inputs are passed as a single dict which contains `labels`. # The `labels` shouldn't be passed to `self.encoder` below, because it is a based model without this # parameter (otherwise, an error occurs when `input_processing` is called inside `self.encoder.call()`). if "labels" in encoder_inputs: labels = encoder_inputs.pop("labels") # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`. if "decoder_input_ids" in encoder_inputs: decoder_input_ids = encoder_inputs.pop("decoder_input_ids") # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`. if "decoder_attention_mask" in encoder_inputs: decoder_attention_mask = encoder_inputs.pop("decoder_attention_mask") encoder_outputs = self.encoder(**encoder_inputs) encoder_hidden_states = encoder_outputs[0] # optionally project encoder_hidden_states if ( self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None): decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) decoder_inputs = { "input_ids": decoder_input_ids, "attention_mask": decoder_attention_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": attention_mask, "inputs_embeds": decoder_inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "use_cache": use_cache, "past_key_values": past_key_values, "return_dict": return_dict, "training": training, } # Add arguments to decoder from `kwargs_decoder` decoder_inputs.update(kwargs_decoder) decoder_outputs = self.decoder(**decoder_inputs) logits = decoder_outputs[0] # Compute loss independent from decoder (as some shift the logits inside them) loss = None if labels is not None: warnings.warn(DEPRECATION_WARNING, FutureWarning) loss = self.hf_compute_loss(labels, logits) if not return_dict: past_key_values = None if use_cache: past_key_values = decoder_outputs[1] # The starting index of the remaining elements in `decoder_outputs` start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)]) if not isinstance(encoder_outputs, tuple): encoder_outputs = encoder_outputs.to_tuple() output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs output = tuple([x for x in output if x is not None]) return output return TFSeq2SeqLMOutput( loss=loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values) decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None past_key_values = decoder_inputs.get("past_key_values") if past_key_values is None: past_key_values = decoder_inputs.get("past") # e.g. on TF GPT2 input_dict = { "input_ids": None, # needs to be passed to make Keras.layer.__call__ happy "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "decoder_input_ids": decoder_inputs["input_ids"], # TODO (joao): the `TFBaseModelOutput` wrapper should not be needed after the generate refactor is complete "encoder_outputs": TFBaseModelOutput(last_hidden_state=encoder_outputs[0]), "past_key_values": past_key_values, "use_cache": use_cache, } return input_dict def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) def resize_token_embeddings(self, *args, **kwargs): raise NotImplementedError( "Resizing the embedding layers via the TFEncoderDecoderModel directly is not supported.Please use the" " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or" " model.decoder.resize_token_embeddings(...))" ) def _reorder_cache(self, past, beam_idx): # apply decoder cache reordering here return self.decoder._reorder_cache(past, beam_idx) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "enc_to_dec_proj", None) is not None: with tf.name_scope(self.enc_to_dec_proj.name): self.enc_to_dec_proj.build([None, None, self.encoder.config.hidden_size]) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes to support Flax Encoder-Decoder architectures""" import os from typing import Optional, Tuple, Union import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput from ...modeling_flax_utils import FlaxPreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..auto.configuration_auto import AutoConfig from ..auto.modeling_flax_auto import FlaxAutoModel, FlaxAutoModelForCausalLM from .configuration_encoder_decoder import EncoderDecoderConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "EncoderDecoderConfig" ENCODER_DECODER_START_DOCSTRING = r""" This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization. The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information). This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Parameters: config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ ENCODER_DECODER_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`. decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.encoder.max_position_embeddings - 1]`. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.decoder.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.FlaxSeq2SeqLMOutput`] instead of a plain tuple. """ ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.encoder.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.FlaxBaseModelOutput`] instead of a plain tuple. """ ENCODER_DECODER_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`. encoder_outputs (`tuple(tuple(jnp.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.decoder.max_position_embeddings - 1]`. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.FlaxCausalLMOutputWithCrossAttentions`] instead of a plain tuple. """ class FlaxEncoderDecoderModule(nn.Module): config: EncoderDecoderConfig dtype: jnp.dtype = jnp.float32 def setup(self): encoder_config = self.config.encoder decoder_config = self.config.decoder # Copied from `modeling_hybrid_clip.py` with modifications. from ...models.auto.modeling_flax_auto import FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_MAPPING encoder_module = FLAX_MODEL_MAPPING[encoder_config.__class__].module_class decoder_module = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING[decoder_config.__class__].module_class self.encoder = encoder_module(encoder_config, dtype=self.dtype) self.decoder = decoder_module(decoder_config, dtype=self.dtype) # encoder outputs might need to be projected to different dimension for decoder if ( self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): self.enc_to_dec_proj = nn.Dense( self.decoder.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.decoder.config.initializer_range), dtype=self.dtype, ) else: self.enc_to_dec_proj = None def _get_encoder_module(self): return self.encoder def _get_projection_module(self): return self.enc_to_dec_proj def _get_decoder_module(self): return self.decoder def __call__( self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) encoder_hidden_states = encoder_outputs[0] # optionally project encoder_hidden_states if self.enc_to_dec_proj is not None: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqLMOutput( logits=decoder_outputs.logits, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING) class FlaxEncoderDecoderModel(FlaxPreTrainedModel): r""" [`FlaxEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base model classes of the library as encoder module and another one as decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder. """ config_class = EncoderDecoderConfig base_model_prefix = "encoder_decoder" module_class = FlaxEncoderDecoderModule def __init__( self, config: EncoderDecoderConfig, input_shape: Optional[Tuple] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): if input_shape is None: input_shape = ((1, 1), (1, 1)) if not _do_init: raise ValueError( "`FlaxEncoderDecoderModel` cannot be created without initializing, `_do_init` must be `True`." ) if config.decoder.cross_attention_hidden_size is not None: if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: raise ValueError( "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal" f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for" " `config.encoder.hidden_size`." ) module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: encoder_input_shape, decoder_input_shape = input_shape # init input tensors input_ids = jnp.zeros(encoder_input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) decoder_input_ids = jnp.zeros(decoder_input_shape, dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) decoder_batch_size, decoder_sequence_length = decoder_input_ids.shape if not decoder_batch_size == batch_size: raise ValueError( f"The inputs of encoder and decoder should have the same batch size, but got {batch_size} for encoder" f" and {decoder_batch_size} for decoder." ) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_sequence_length)[None, :], (decoder_batch_size, decoder_sequence_length) ) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to( jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape ) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) @add_start_docstrings(ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=_CONFIG_FOR_DOC) def encode( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2") >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> text = "My friends are cool but they eat too many carbs." >>> input_ids = tokenizer.encode(text, return_tensors="np") >>> encoder_outputs = model.encode(input_ids) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) outputs = self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward, ) if return_dict: outputs = FlaxBaseModelOutput( last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) return outputs @add_start_docstrings(ENCODER_DECODER_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer >>> import jax.numpy as jnp >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2") >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> text = "My friends are cool but they eat too many carbs." >>> input_ids = tokenizer.encode(text, max_length=1024, return_tensors="np") >>> encoder_outputs = model.encode(input_ids) >>> decoder_start_token_id = model.config.decoder.bos_token_id >>> decoder_input_ids = jnp.ones((input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxBartAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward( module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs ): projection_module = module._get_projection_module() decoder_module = module._get_decoder_module() # optionally project encoder_hidden_states if projection_module is not None: encoder_hidden_states = projection_module(encoder_hidden_states) return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states=encoder_hidden_states, **kwargs, ) outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_input_ids: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Examples: ```python >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer, GPT2Tokenizer >>> # load a fine-tuned bert2gpt2 model >>> model = FlaxEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16") >>> # load input & output tokenizer >>> tokenizer_input = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> tokenizer_output = GPT2Tokenizer.from_pretrained("openai-community/gpt2") >>> article = '''Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members >>> singing a racist chant. SAE's national chapter suspended the students, >>> but University of Oklahoma President David Boren took it a step further, >>> saying the university's affiliation with the fraternity is permanently done.''' >>> input_ids = tokenizer_input(article, add_special_tokens=True, return_tensors="np").input_ids >>> # use GPT2's eos_token as the pad as well as eos token >>> model.config.eos_token_id = model.config.decoder.eos_token_id >>> model.config.pad_token_id = model.config.eos_token_id >>> sequences = model.generate(input_ids, num_beams=4, max_length=12).sequences >>> summary = tokenizer_output.batch_decode(sequences, skip_special_tokens=True)[0] >>> assert summary == "SAS Alpha Epsilon suspended Sigma Alpha Epsilon members" ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) # prepare decoder inputs if decoder_input_ids is None: raise ValueError( "`decoder_input_ids` cannot be `None`. For sequence to sequence training, `decoder_position_ids` must" " be specified as an input argument." ) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: batch_size, sequence_length = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array] = None, decoder_attention_mask: Optional[jax.Array] = None, encoder_outputs=None, **kwargs, ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: decoder_position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: decoder_position_ids = jnp.broadcast_to( jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length) ) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, "decoder_position_ids": decoder_position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 return model_kwargs @classmethod def from_encoder_decoder_pretrained( cls, encoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, decoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, *model_args, **kwargs, ) -> FlaxPreTrainedModel: r""" Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints. Params: encoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*): Information necessary to initiate the encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. decoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*, defaults to `None`): Information necessary to initiate the decoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. model_args (remaining positional arguments, *optional*): All remaning positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter. - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import FlaxEncoderDecoderModel >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2") >>> # saving model after fine-tuning >>> model.save_pretrained("./bert2gpt2") >>> # load fine-tuned model >>> model = FlaxEncoderDecoderModel.from_pretrained("./bert2gpt2") ```""" kwargs_encoder = { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } # remove encoder, decoder kwargs from kwargs for key in kwargs_encoder.keys(): del kwargs["encoder_" + key] for key in kwargs_decoder.keys(): del kwargs["decoder_" + key] # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. encoder = kwargs_encoder.pop("model", None) if encoder is None: if encoder_pretrained_model_name_or_path is None: raise ValueError( "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_encoder: encoder_config, kwargs_encoder = AutoConfig.from_pretrained( encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True ) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info( f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model " "from a decoder model. Cross-attention and casual mask are disabled." ) encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_encoder["config"] = encoder_config encoder = FlaxAutoModel.from_pretrained( encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder ) decoder = kwargs_decoder.pop("model", None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError( "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_decoder: decoder_config, kwargs_decoder = AutoConfig.from_pretrained( decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True ) if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info( f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." ) decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder["config"] = decoder_config if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: logger.warning( f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a " "`decoder_config` to `.from_encoder_decoder_pretrained(...)`" ) decoder = FlaxAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) # instantiate config with corresponding kwargs dtype = kwargs.pop("dtype", jnp.float32) config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) # init model model = cls(config, dtype=dtype) model.params["encoder"] = encoder.params model.params["decoder"] = decoder.params return model
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/layoutlmv2/processing_layoutlmv2.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for LayoutLMv2. """ import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class LayoutLMv2Processor(ProcessorMixin): r""" Constructs a LayoutLMv2 processor which combines a LayoutLMv2 image processor and a LayoutLMv2 tokenizer into a single processor. [`LayoutLMv2Processor`] offers all the functionalities you need to prepare data for the model. It first uses [`LayoutLMv2ImageProcessor`] to resize document images to a fixed size, and optionally applies OCR to get words and normalized bounding boxes. These are then provided to [`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned into token-level `labels` for token classification tasks (such as FUNSD, CORD). Args: image_processor (`LayoutLMv2ImageProcessor`, *optional*): An instance of [`LayoutLMv2ImageProcessor`]. The image processor is a required input. tokenizer (`LayoutLMv2Tokenizer` or `LayoutLMv2TokenizerFast`, *optional*): An instance of [`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "LayoutLMv2ImageProcessor" tokenizer_class = ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast") def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) def __call__( self, images, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = False, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchEncoding: """ This method first forwards the `images` argument to [`~LayoutLMv2ImageProcessor.__call__`]. In case [`LayoutLMv2ImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and bounding boxes along with the additional arguments to [`~LayoutLMv2Tokenizer.__call__`] and returns the output, together with resized `images`. In case [`LayoutLMv2ImageProcessor`] was initialized with `apply_ocr` set to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional arguments to [`~LayoutLMv2Tokenizer.__call__`] and returns the output, together with resized `images``. Please refer to the docstring of the above two methods for more information. """ # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.") # first, apply the image processor features = self.image_processor(images=images, return_tensors=return_tensors) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(text, str): text = [text] # add batch dimension (as the image processor always adds a batch dimension) text_pair = features["words"] encoded_inputs = self.tokenizer( text=text if text is not None else features["words"], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features["boxes"], word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) # add pixel values images = features.pop("pixel_values") if return_overflowing_tokens is True: images = self.get_overflowing_images(images, encoded_inputs["overflow_to_sample_mapping"]) encoded_inputs["image"] = images return encoded_inputs def get_overflowing_images(self, images, overflow_to_sample_mapping): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image images_with_overflow = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx]) if len(images_with_overflow) != len(overflow_to_sample_mapping): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}" ) return images_with_overflow def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): return ["input_ids", "bbox", "token_type_ids", "attention_mask", "image"] @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py
# coding=utf-8 # Copyright 2021 Microsoft Research The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch LayoutLMv2 model.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_detectron2_available, logging, replace_return_docstrings, requires_backends, ) from .configuration_layoutlmv2 import LayoutLMv2Config # soft dependency if is_detectron2_available(): import detectron2 from detectron2.modeling import META_ARCH_REGISTRY logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "microsoft/layoutlmv2-base-uncased" _CONFIG_FOR_DOC = "LayoutLMv2Config" from ..deprecated._archive_maps import LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 class LayoutLMv2Embeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super(LayoutLMv2Embeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def _calc_spatial_position_embeddings(self, bbox): try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) return spatial_position_embeddings class LayoutLMv2SelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.fast_qkv = config.fast_qkv self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias if config.fast_qkv: self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False) self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size)) self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size)) else: self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def compute_qkv(self, hidden_states): if self.fast_qkv: qkv = self.qkv_linear(hidden_states) q, k, v = torch.chunk(qkv, 3, dim=-1) if q.ndimension() == self.q_bias.ndimension(): q = q + self.q_bias v = v + self.v_bias else: _sz = (1,) * (q.ndimension() - 1) + (-1,) q = q + self.q_bias.view(*_sz) v = v + self.v_bias.view(*_sz) else: q = self.query(hidden_states) k = self.key(hidden_states) v = self.value(hidden_states) return q, k, v def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): q, k, v = self.compute_qkv(hidden_states) # (B, L, H*D) -> (B, H, L, D) query_layer = self.transpose_for_scores(q) key_layer = self.transpose_for_scores(k) value_layer = self.transpose_for_scores(v) query_layer = query_layer / math.sqrt(self.attention_head_size) # [BSZ, NAT, L, L] attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.has_relative_attention_bias: attention_scores += rel_pos if self.has_spatial_attention_bias: attention_scores += rel_2d_pos attention_scores = attention_scores.float().masked_fill_( attention_mask.to(torch.bool), torch.finfo(attention_scores.dtype).min ) attention_probs = nn.functional.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class LayoutLMv2Attention(nn.Module): def __init__(self, config): super().__init__() self.self = LayoutLMv2SelfAttention(config) self.output = LayoutLMv2SelfOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class LayoutLMv2SelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->LayoutLMv2 class LayoutLMv2Intermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM class LayoutLMv2Output(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LayoutLMv2Layer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LayoutLMv2Attention(config) self.intermediate = LayoutLMv2Intermediate(config) self.output = LayoutLMv2Output(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on. Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ ret = 0 if bidirectional: num_buckets //= 2 ret += (relative_position > 0).long() * num_buckets n = torch.abs(relative_position) else: n = torch.max(-relative_position, torch.zeros_like(relative_position)) # now n is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = n < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret class LayoutLMv2Encoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)]) self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias if self.has_relative_attention_bias: self.rel_pos_bins = config.rel_pos_bins self.max_rel_pos = config.max_rel_pos self.rel_pos_bias = nn.Linear(self.rel_pos_bins, config.num_attention_heads, bias=False) if self.has_spatial_attention_bias: self.max_rel_2d_pos = config.max_rel_2d_pos self.rel_2d_pos_bins = config.rel_2d_pos_bins self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False) self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False) self.gradient_checkpointing = False def _calculate_1d_position_embeddings(self, position_ids): rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1) rel_pos = relative_position_bucket( rel_pos_mat, num_buckets=self.rel_pos_bins, max_distance=self.max_rel_pos, ) rel_pos = self.rel_pos_bias.weight.t()[rel_pos].permute(0, 3, 1, 2) rel_pos = rel_pos.contiguous() return rel_pos def _calculate_2d_position_embeddings(self, bbox): position_coord_x = bbox[:, :, 0] position_coord_y = bbox[:, :, 3] rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1) rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1) rel_pos_x = relative_position_bucket( rel_pos_x_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos, ) rel_pos_y = relative_position_bucket( rel_pos_y_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos, ) rel_pos_x = self.rel_pos_x_bias.weight.t()[rel_pos_x].permute(0, 3, 1, 2) rel_pos_y = self.rel_pos_y_bias.weight.t()[rel_pos_y].permute(0, 3, 1, 2) rel_pos_x = rel_pos_x.contiguous() rel_pos_y = rel_pos_y.contiguous() rel_2d_pos = rel_pos_x + rel_pos_y return rel_2d_pos def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, bbox=None, position_ids=None, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None rel_pos = self._calculate_1d_position_embeddings(position_ids) if self.has_relative_attention_bias else None rel_2d_pos = self._calculate_2d_position_embeddings(bbox) if self.has_spatial_attention_bias else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, all_self_attentions, ] if v is not None ) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class LayoutLMv2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LayoutLMv2Config base_model_prefix = "layoutlmv2" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, LayoutLMv2Model): if hasattr(module, "visual_segment_embedding"): module.visual_segment_embedding.data.normal_(mean=0.0, std=self.config.initializer_range) def my_convert_sync_batchnorm(module, process_group=None): # same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d` if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group) module_output = module if isinstance(module, detectron2.layers.FrozenBatchNorm2d): module_output = torch.nn.SyncBatchNorm( num_features=module.num_features, eps=module.eps, affine=True, track_running_stats=True, process_group=process_group, ) module_output.weight = torch.nn.Parameter(module.weight) module_output.bias = torch.nn.Parameter(module.bias) module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device) for name, child in module.named_children(): module_output.add_module(name, my_convert_sync_batchnorm(child, process_group)) del module return module_output class LayoutLMv2VisualBackbone(nn.Module): def __init__(self, config): super().__init__() self.cfg = config.get_detectron2_config() meta_arch = self.cfg.MODEL.META_ARCHITECTURE model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg) assert isinstance(model.backbone, detectron2.modeling.backbone.FPN) self.backbone = model.backbone assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD) num_channels = len(self.cfg.MODEL.PIXEL_MEAN) self.register_buffer( "pixel_mean", torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1), persistent=False, ) self.register_buffer( "pixel_std", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1), persistent=False ) self.out_feature_key = "p2" if torch.are_deterministic_algorithms_enabled(): logger.warning("using `AvgPool2d` instead of `AdaptiveAvgPool2d`") input_shape = (224, 224) backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride self.pool = nn.AvgPool2d( ( math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]), math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]), ) ) else: self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2]) if len(config.image_feature_pool_shape) == 2: config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels) assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2] def forward(self, images): images_input = ((images if torch.is_tensor(images) else images.tensor) - self.pixel_mean) / self.pixel_std features = self.backbone(images_input) features = features[self.out_feature_key] features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous() return features def synchronize_batch_norm(self): if not ( torch.distributed.is_available() and torch.distributed.is_initialized() and torch.distributed.get_rank() > -1 ): raise RuntimeError("Make sure torch.distributed is set up properly.") self_rank = torch.distributed.get_rank() node_size = torch.cuda.device_count() world_size = torch.distributed.get_world_size() if not (world_size % node_size == 0): raise RuntimeError("Make sure the number of processes can be divided by the number of nodes") node_global_ranks = [list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)] sync_bn_groups = [ torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size) ] node_rank = self_rank // node_size self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank]) LAYOUTLMV2_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LayoutLMv2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LAYOUTLMV2_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `{0}`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`): Batch of document images. attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `{0}`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class LayoutLMv2Pooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output @add_start_docstrings( "The bare LayoutLMv2 Model transformer outputting raw hidden-states without any specific head on top.", LAYOUTLMV2_START_DOCSTRING, ) class LayoutLMv2Model(LayoutLMv2PreTrainedModel): def __init__(self, config): requires_backends(self, "detectron2") super().__init__(config) self.config = config self.has_visual_segment_embedding = config.has_visual_segment_embedding self.embeddings = LayoutLMv2Embeddings(config) self.visual = LayoutLMv2VisualBackbone(config) self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size) if self.has_visual_segment_embedding: self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0]) self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.visual_dropout = nn.Dropout(config.hidden_dropout_prob) self.encoder = LayoutLMv2Encoder(config) self.pooler = LayoutLMv2Pooler(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) if inputs_embeds is None: inputs_embeds = self.embeddings.word_embeddings(input_ids) position_embeddings = self.embeddings.position_embeddings(position_ids) spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox) token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + spatial_position_embeddings + token_type_embeddings embeddings = self.embeddings.LayerNorm(embeddings) embeddings = self.embeddings.dropout(embeddings) return embeddings def _calc_img_embeddings(self, image, bbox, position_ids): visual_embeddings = self.visual_proj(self.visual(image)) position_embeddings = self.embeddings.position_embeddings(position_ids) spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox) embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings if self.has_visual_segment_embedding: embeddings += self.visual_segment_embedding embeddings = self.visual_LayerNorm(embeddings) embeddings = self.visual_dropout(embeddings) return embeddings def _calc_visual_bbox(self, image_feature_pool_shape, bbox, device, final_shape): visual_bbox_x = torch.div( torch.arange( 0, 1000 * (image_feature_pool_shape[1] + 1), 1000, device=device, dtype=bbox.dtype, ), self.config.image_feature_pool_shape[1], rounding_mode="floor", ) visual_bbox_y = torch.div( torch.arange( 0, 1000 * (self.config.image_feature_pool_shape[0] + 1), 1000, device=device, dtype=bbox.dtype, ), self.config.image_feature_pool_shape[0], rounding_mode="floor", ) visual_bbox = torch.stack( [ visual_bbox_x[:-1].repeat(image_feature_pool_shape[0], 1), visual_bbox_y[:-1].repeat(image_feature_pool_shape[1], 1).transpose(0, 1), visual_bbox_x[1:].repeat(image_feature_pool_shape[0], 1), visual_bbox_y[1:].repeat(image_feature_pool_shape[1], 1).transpose(0, 1), ], dim=-1, ).view(-1, bbox.size(-1)) visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1) return visual_bbox def _get_input_shape(self, input_ids=None, inputs_embeds=None): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: return input_ids.size() elif inputs_embeds is not None: return inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, bbox: Optional[torch.LongTensor] = None, image: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Return: Examples: ```python >>> from transformers import AutoProcessor, LayoutLMv2Model, set_seed >>> from PIL import Image >>> import torch >>> from datasets import load_dataset >>> set_seed(0) >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") >>> model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased") >>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa") >>> image_path = dataset["test"][0]["file"] >>> image = Image.open(image_path).convert("RGB") >>> encoding = processor(image, return_tensors="pt") >>> outputs = model(**encoding) >>> last_hidden_states = outputs.last_hidden_state >>> last_hidden_states.shape torch.Size([1, 342, 768]) ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = self._get_input_shape(input_ids, inputs_embeds) device = input_ids.device if input_ids is not None else inputs_embeds.device visual_shape = list(input_shape) visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1] visual_shape = torch.Size(visual_shape) # needs a new copy of input_shape for tracing. Otherwise wrong dimensions will occur final_shape = list(self._get_input_shape(input_ids, inputs_embeds)) final_shape[1] += visual_shape[1] final_shape = torch.Size(final_shape) visual_bbox = self._calc_visual_bbox(self.config.image_feature_pool_shape, bbox, device, final_shape) final_bbox = torch.cat([bbox, visual_bbox], dim=1) if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) visual_attention_mask = torch.ones(visual_shape, device=device) final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if position_ids is None: seq_length = input_shape[1] position_ids = self.embeddings.position_ids[:, :seq_length] position_ids = position_ids.expand(input_shape) visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat( input_shape[0], 1 ) final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) text_layout_emb = self._calc_text_embeddings( input_ids=input_ids, bbox=bbox, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, ) visual_emb = self._calc_img_embeddings( image=image, bbox=visual_bbox, position_ids=visual_position_ids, ) final_emb = torch.cat([text_layout_emb, visual_emb], dim=1) extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( final_emb, extended_attention_mask, bbox=final_bbox, position_ids=final_position_ids, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """ LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual embeddings, e.g. for document image classification tasks such as the [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset. """, LAYOUTLMV2_START_DOCSTRING, ) class LayoutLMv2ForSequenceClassification(LayoutLMv2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.layoutlmv2 = LayoutLMv2Model(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, bbox: Optional[torch.LongTensor] = None, image: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Example: ```python >>> from transformers import AutoProcessor, LayoutLMv2ForSequenceClassification, set_seed >>> from PIL import Image >>> import torch >>> from datasets import load_dataset >>> set_seed(0) >>> dataset = load_dataset("rvl_cdip", split="train", streaming=True) >>> data = next(iter(dataset)) >>> image = data["image"].convert("RGB") >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") >>> model = LayoutLMv2ForSequenceClassification.from_pretrained( ... "microsoft/layoutlmv2-base-uncased", num_labels=dataset.info.features["label"].num_classes ... ) >>> encoding = processor(image, return_tensors="pt") >>> sequence_label = torch.tensor([data["label"]]) >>> outputs = model(**encoding, labels=sequence_label) >>> loss, logits = outputs.loss, outputs.logits >>> predicted_idx = logits.argmax(dim=-1).item() >>> predicted_answer = dataset.info.features["label"].names[4] >>> predicted_idx, predicted_answer # results are not good without further fine-tuning (7, 'advertisement') ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device visual_shape = list(input_shape) visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1] visual_shape = torch.Size(visual_shape) final_shape = list(input_shape) final_shape[1] += visual_shape[1] final_shape = torch.Size(final_shape) visual_bbox = self.layoutlmv2._calc_visual_bbox( self.config.image_feature_pool_shape, bbox, device, final_shape ) visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat( input_shape[0], 1 ) initial_image_embeddings = self.layoutlmv2._calc_img_embeddings( image=image, bbox=visual_bbox, position_ids=visual_position_ids, ) outputs = self.layoutlmv2( input_ids=input_ids, bbox=bbox, image=image, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] sequence_output, final_image_embeddings = outputs[0][:, :seq_length], outputs[0][:, seq_length:] cls_final_output = sequence_output[:, 0, :] # average-pool the visual embeddings pooled_initial_image_embeddings = initial_image_embeddings.mean(dim=1) pooled_final_image_embeddings = final_image_embeddings.mean(dim=1) # concatenate with cls_final_output sequence_output = torch.cat( [cls_final_output, pooled_initial_image_embeddings, pooled_final_image_embeddings], dim=1 ) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden states) e.g. for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/), [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and [Kleister-NDA](https://github.com/applicaai/kleister-nda). """, LAYOUTLMV2_START_DOCSTRING, ) class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.layoutlmv2 = LayoutLMv2Model(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, bbox: Optional[torch.LongTensor] = None, image: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Returns: Example: ```python >>> from transformers import AutoProcessor, LayoutLMv2ForTokenClassification, set_seed >>> from PIL import Image >>> from datasets import load_dataset >>> set_seed(0) >>> datasets = load_dataset("nielsr/funsd", split="test") >>> labels = datasets.features["ner_tags"].feature.names >>> id2label = {v: k for v, k in enumerate(labels)} >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") >>> model = LayoutLMv2ForTokenClassification.from_pretrained( ... "microsoft/layoutlmv2-base-uncased", num_labels=len(labels) ... ) >>> data = datasets[0] >>> image = Image.open(data["image_path"]).convert("RGB") >>> words = data["words"] >>> boxes = data["bboxes"] # make sure to normalize your bounding boxes >>> word_labels = data["ner_tags"] >>> encoding = processor( ... image, ... words, ... boxes=boxes, ... word_labels=word_labels, ... padding="max_length", ... truncation=True, ... return_tensors="pt", ... ) >>> outputs = model(**encoding) >>> logits, loss = outputs.logits, outputs.loss >>> predicted_token_class_ids = logits.argmax(-1) >>> predicted_tokens_classes = [id2label[t.item()] for t in predicted_token_class_ids[0]] >>> predicted_tokens_classes[:5] # results are not good without further fine-tuning ['I-HEADER', 'I-HEADER', 'I-QUESTION', 'I-HEADER', 'I-QUESTION'] ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv2( input_ids=input_ids, bbox=bbox, image=image, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] # only take the text part of the output representations sequence_output = outputs[0][:, :seq_length] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ LayoutLMv2 Model with a span classification head on top for extractive question-answering tasks such as [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to compute `span start logits` and `span end logits`). """, LAYOUTLMV2_START_DOCSTRING, ) class LayoutLMv2ForQuestionAnswering(LayoutLMv2PreTrainedModel): def __init__(self, config, has_visual_segment_embedding=True): super().__init__(config) self.num_labels = config.num_labels config.has_visual_segment_embedding = has_visual_segment_embedding self.layoutlmv2 = LayoutLMv2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, bbox: Optional[torch.LongTensor] = None, image: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: Example: In this example below, we give the LayoutLMv2 model an image (of texts) and ask it a question. It will give us a prediction of what it thinks the answer is (the span of the answer within the texts parsed from the image). ```python >>> from transformers import AutoProcessor, LayoutLMv2ForQuestionAnswering, set_seed >>> import torch >>> from PIL import Image >>> from datasets import load_dataset >>> set_seed(0) >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") >>> model = LayoutLMv2ForQuestionAnswering.from_pretrained("microsoft/layoutlmv2-base-uncased") >>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa") >>> image_path = dataset["test"][0]["file"] >>> image = Image.open(image_path).convert("RGB") >>> question = "When is coffee break?" >>> encoding = processor(image, question, return_tensors="pt") >>> outputs = model(**encoding) >>> predicted_start_idx = outputs.start_logits.argmax(-1).item() >>> predicted_end_idx = outputs.end_logits.argmax(-1).item() >>> predicted_start_idx, predicted_end_idx (30, 191) >>> predicted_answer_tokens = encoding.input_ids.squeeze()[predicted_start_idx : predicted_end_idx + 1] >>> predicted_answer = processor.tokenizer.decode(predicted_answer_tokens) >>> predicted_answer # results are not good without further fine-tuning '44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from' ``` ```python >>> target_start_index = torch.tensor([7]) >>> target_end_index = torch.tensor([14]) >>> outputs = model(**encoding, start_positions=target_start_index, end_positions=target_end_index) >>> predicted_answer_span_start = outputs.start_logits.argmax(-1).item() >>> predicted_answer_span_end = outputs.end_logits.argmax(-1).item() >>> predicted_answer_span_start, predicted_answer_span_end (30, 191) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv2( input_ids=input_ids, bbox=bbox, image=image, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] # only take the text part of the output representations sequence_output = outputs[0][:, :seq_length] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ LayoutLMv2 model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import is_detectron2_available, logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 # soft dependency if is_detectron2_available(): import detectron2 class LayoutLMv2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LayoutLMv2Model`]. It is used to instantiate an LayoutLMv2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LayoutLMv2 [microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the LayoutLMv2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LayoutLMv2Model`] or [`TFLayoutLMv2Model`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv2Model`] or [`TFLayoutLMv2Model`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. max_2d_position_embeddings (`int`, *optional*, defaults to 1024): The maximum value that the 2D position embedding might ever be used with. Typically set this to something large just in case (e.g., 1024). max_rel_pos (`int`, *optional*, defaults to 128): The maximum number of relative positions to be used in the self-attention mechanism. rel_pos_bins (`int`, *optional*, defaults to 32): The number of relative position bins to be used in the self-attention mechanism. fast_qkv (`bool`, *optional*, defaults to `True`): Whether or not to use a single matrix for the queries, keys, values in the self-attention layers. max_rel_2d_pos (`int`, *optional*, defaults to 256): The maximum number of relative 2D positions in the self-attention mechanism. rel_2d_pos_bins (`int`, *optional*, defaults to 64): The number of 2D relative position bins in the self-attention mechanism. image_feature_pool_shape (`List[int]`, *optional*, defaults to [7, 7, 256]): The shape of the average-pooled feature map. coordinate_size (`int`, *optional*, defaults to 128): Dimension of the coordinate embeddings. shape_size (`int`, *optional*, defaults to 128): Dimension of the width and height embeddings. has_relative_attention_bias (`bool`, *optional*, defaults to `True`): Whether or not to use a relative attention bias in the self-attention mechanism. has_spatial_attention_bias (`bool`, *optional*, defaults to `True`): Whether or not to use a spatial attention bias in the self-attention mechanism. has_visual_segment_embedding (`bool`, *optional*, defaults to `False`): Whether or not to add visual segment embeddings. detectron2_config_args (`dict`, *optional*): Dictionary containing the configuration arguments of the Detectron2 visual backbone. Refer to [this file](https://github.com/microsoft/unilm/blob/master/layoutlmft/layoutlmft/models/layoutlmv2/detectron2_config.py) for details regarding default values. Example: ```python >>> from transformers import LayoutLMv2Config, LayoutLMv2Model >>> # Initializing a LayoutLMv2 microsoft/layoutlmv2-base-uncased style configuration >>> configuration = LayoutLMv2Config() >>> # Initializing a model (with random weights) from the microsoft/layoutlmv2-base-uncased style configuration >>> model = LayoutLMv2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "layoutlmv2" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, max_2d_position_embeddings=1024, max_rel_pos=128, rel_pos_bins=32, fast_qkv=True, max_rel_2d_pos=256, rel_2d_pos_bins=64, convert_sync_batchnorm=True, image_feature_pool_shape=[7, 7, 256], coordinate_size=128, shape_size=128, has_relative_attention_bias=True, has_spatial_attention_bias=True, has_visual_segment_embedding=False, detectron2_config_args=None, **kwargs, ): super().__init__( vocab_size=vocab_size, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, hidden_act=hidden_act, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, initializer_range=initializer_range, layer_norm_eps=layer_norm_eps, pad_token_id=pad_token_id, **kwargs, ) self.max_2d_position_embeddings = max_2d_position_embeddings self.max_rel_pos = max_rel_pos self.rel_pos_bins = rel_pos_bins self.fast_qkv = fast_qkv self.max_rel_2d_pos = max_rel_2d_pos self.rel_2d_pos_bins = rel_2d_pos_bins self.convert_sync_batchnorm = convert_sync_batchnorm self.image_feature_pool_shape = image_feature_pool_shape self.coordinate_size = coordinate_size self.shape_size = shape_size self.has_relative_attention_bias = has_relative_attention_bias self.has_spatial_attention_bias = has_spatial_attention_bias self.has_visual_segment_embedding = has_visual_segment_embedding self.detectron2_config_args = ( detectron2_config_args if detectron2_config_args is not None else self.get_default_detectron2_config() ) @classmethod def get_default_detectron2_config(self): return { "MODEL.MASK_ON": True, "MODEL.PIXEL_STD": [57.375, 57.120, 58.395], "MODEL.BACKBONE.NAME": "build_resnet_fpn_backbone", "MODEL.FPN.IN_FEATURES": ["res2", "res3", "res4", "res5"], "MODEL.ANCHOR_GENERATOR.SIZES": [[32], [64], [128], [256], [512]], "MODEL.RPN.IN_FEATURES": ["p2", "p3", "p4", "p5", "p6"], "MODEL.RPN.PRE_NMS_TOPK_TRAIN": 2000, "MODEL.RPN.PRE_NMS_TOPK_TEST": 1000, "MODEL.RPN.POST_NMS_TOPK_TRAIN": 1000, "MODEL.POST_NMS_TOPK_TEST": 1000, "MODEL.ROI_HEADS.NAME": "StandardROIHeads", "MODEL.ROI_HEADS.NUM_CLASSES": 5, "MODEL.ROI_HEADS.IN_FEATURES": ["p2", "p3", "p4", "p5"], "MODEL.ROI_BOX_HEAD.NAME": "FastRCNNConvFCHead", "MODEL.ROI_BOX_HEAD.NUM_FC": 2, "MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION": 14, "MODEL.ROI_MASK_HEAD.NAME": "MaskRCNNConvUpsampleHead", "MODEL.ROI_MASK_HEAD.NUM_CONV": 4, "MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION": 7, "MODEL.RESNETS.DEPTH": 101, "MODEL.RESNETS.SIZES": [[32], [64], [128], [256], [512]], "MODEL.RESNETS.ASPECT_RATIOS": [[0.5, 1.0, 2.0]], "MODEL.RESNETS.OUT_FEATURES": ["res2", "res3", "res4", "res5"], "MODEL.RESNETS.NUM_GROUPS": 32, "MODEL.RESNETS.WIDTH_PER_GROUP": 8, "MODEL.RESNETS.STRIDE_IN_1X1": False, } def get_detectron2_config(self): detectron2_config = detectron2.config.get_cfg() for k, v in self.detectron2_config_args.items(): attributes = k.split(".") to_set = detectron2_config for attribute in attributes[:-1]: to_set = getattr(to_set, attribute) setattr(to_set, attributes[-1], v) return detectron2_config
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Feature extractor class for LayoutLMv2. """ import warnings from ...utils import logging from .image_processing_layoutlmv2 import LayoutLMv2ImageProcessor logger = logging.get_logger(__name__) class LayoutLMv2FeatureExtractor(LayoutLMv2ImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for LayoutLMv2.""" from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract logger = logging.get_logger(__name__) def normalize_box(box, width, height): return [ int(1000 * (box[0] / width)), int(1000 * (box[1] / height)), int(1000 * (box[2] / width)), int(1000 * (box[3] / height)), ] def apply_tesseract( image: np.ndarray, lang: Optional[str], tesseract_config: Optional[str] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" tesseract_config = tesseract_config if tesseract_config is not None else "" # apply OCR pil_image = to_pil_image(image, input_data_format=input_data_format) image_width, image_height = pil_image.size data = pytesseract.image_to_data(pil_image, lang=lang, output_type="dict", config=tesseract_config) words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format actual_boxes = [] for x, y, w, h in zip(left, top, width, height): actual_box = [x, y, x + w, y + h] actual_boxes.append(actual_box) # finally, normalize the bounding boxes normalized_boxes = [] for box in actual_boxes: normalized_boxes.append(normalize_box(box, image_width, image_height)) assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes" return words, normalized_boxes class LayoutLMv2ImageProcessor(BaseImageProcessor): r""" Constructs a LayoutLMv2 image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to `(size["height"], size["width"])`. Can be overridden by `do_resize` in `preprocess`. size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after resizing. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. apply_ocr (`bool`, *optional*, defaults to `True`): Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by `apply_ocr` in `preprocess`. ocr_lang (`str`, *optional*): The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is used. Can be overridden by `ocr_lang` in `preprocess`. tesseract_config (`str`, *optional*, defaults to `""`): Any additional custom configuration flags that are forwarded to the `config` parameter when calling Tesseract. For example: '--psm 6'. Can be overridden by `tesseract_config` in `preprocess`. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, apply_ocr: bool = True, ocr_lang: Optional[str] = None, tesseract_config: Optional[str] = "", **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) self.do_resize = do_resize self.size = size self.resample = resample self.apply_ocr = apply_ocr self.ocr_lang = ocr_lang self.tesseract_config = tesseract_config self._valid_processor_keys = [ "images", "do_resize", "size", "resample", "apply_ocr", "ocr_lang", "tesseract_config", "return_tensors", "data_format", "input_data_format", ] # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, apply_ocr: bool = None, ocr_lang: Optional[str] = None, tesseract_config: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Desired size of the output image after resizing. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PIL.Image` resampling filter. Only has an effect if `do_resize` is set to `True`. apply_ocr (`bool`, *optional*, defaults to `self.apply_ocr`): Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. ocr_lang (`str`, *optional*, defaults to `self.ocr_lang`): The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is used. tesseract_config (`str`, *optional*, defaults to `self.tesseract_config`): Any additional custom configuration flags that are forwarded to the `config` parameter when calling Tesseract. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample apply_ocr = apply_ocr if apply_ocr is not None else self.apply_ocr ocr_lang = ocr_lang if ocr_lang is not None else self.ocr_lang tesseract_config = tesseract_config if tesseract_config is not None else self.tesseract_config images = make_list_of_images(images) validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if apply_ocr: requires_backends(self, "pytesseract") words_batch = [] boxes_batch = [] for image in images: words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format) words_batch.append(words) boxes_batch.append(boxes) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] # flip color channels from RGB to BGR (as Detectron2 requires this) images = [flip_channel_order(image, input_data_format=input_data_format) for image in images] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) if apply_ocr: data["words"] = words_batch data["boxes"] = boxes_batch return data
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/layoutlmv2/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _import_structure = { "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_layoutlmv2_fast"] = ["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["feature_extraction_layoutlmv2"] = ["LayoutLMv2FeatureExtractor"] _import_structure["image_processing_layoutlmv2"] = ["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_layoutlmv2"] = [ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmv2 import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMv2Config from .processing_layoutlmv2 import LayoutLMv2Processor from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmv2_fast import LayoutLMv2TokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmv2 import LayoutLMv2FeatureExtractor, LayoutLMv2ImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmv2 import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMv2ForQuestionAnswering, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, LayoutLMv2Layer, LayoutLMv2Model, LayoutLMv2PreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fast tokenization class for LayoutLMv2. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus and _encode_plus, in which the Rust tokenizer is used. """ import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import normalizers from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PaddingStrategy, PreTokenizedInput, TensorType, TextInput, TextInputPair, TruncationStrategy, ) from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import add_end_docstrings, logging from .tokenization_layoutlmv2 import ( LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, LayoutLMv2Tokenizer, ) logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [CLS] token. sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original LayoutLMv2). """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = LayoutLMv2Tokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, **kwargs, ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( pre_tok_state.get("lowercase", do_lower_case) != do_lower_case or pre_tok_state.get("strip_accents", strip_accents) != strip_accents ): pre_tok_class = getattr(normalizers, pre_tok_state.pop("type")) pre_tok_state["lowercase"] = do_lower_case pre_tok_state["strip_accents"] = strip_accents self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state) self.do_lower_case = do_lower_case # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: batched_input = [(text, pair)] if pair else [text] encodings = self._tokenizer.encode_batch( batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs ) return encodings[0].tokens @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, `__call__` should be used instead. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, list): raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})") # Set the truncation and padding strategy and restore the initial configuration self.set_truncation_and_padding( padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, ) if is_pair: batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs] encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs ) # Convert encoding to dict # `Tokens` has type: Tuple[ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]], # List[EncodingFast] # ] # with nested dimensions corresponding to batch, overflows, sequence length tokens_and_encodings = [ self._convert_encoding( encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, # we use offsets to create the labels return_length=return_length, verbose=verbose, ) for encoding in encodings ] # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) # (we say ~ because the number of overflow varies with the example in the batch) # # To match each overflowing sample with the original sample in the batch # we add an overflow_to_sample_mapping array (see below) sanitized_tokens = {} for key in tokens_and_encodings[0][0].keys(): stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks["input_ids"]) sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping for input_ids in sanitized_tokens["input_ids"]: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) # create the token boxes token_boxes = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index token_boxes_example = [] for id, sequence_id, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if is_pair and sequence_id == 0: token_boxes_example.append(self.pad_token_box) else: token_boxes_example.append(boxes[original_index][word_id]) else: if id == self.cls_token_id: token_boxes_example.append(self.cls_token_box) elif id == self.sep_token_id: token_boxes_example.append(self.sep_token_box) elif id == self.pad_token_id: token_boxes_example.append(self.pad_token_box) else: raise ValueError("Id not recognized") token_boxes.append(token_boxes_example) sanitized_tokens["bbox"] = token_boxes # optionally, create the labels if word_labels is not None: labels = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index labels_example = [] for id, offset, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_tokens["offset_mapping"][batch_index], sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if self.only_label_first_subword: if offset[0] == 0: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) else: labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) labels.append(labels_example) sanitized_tokens["labels"] = labels # finally, remove offsets if the user didn't want them if not return_offsets_mapping: del sanitized_tokens["offset_mapping"] return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[bool] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # make it a batched input # 2 options: # 1) only text, in case text must be a list of str # 2) text + text_pair, in which case text = str and text_pair a list of str batched_input = [(text, text_pair)] if text_pair else [text] batched_boxes = [boxes] batched_word_labels = [word_labels] if word_labels is not None else None batched_output = self._batch_encode_plus( batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis # Overflowing tokens are returned as a batch of output so we keep them in this case if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items() }, batched_output.encodings, ) self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) return batched_output def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py
# coding=utf-8 # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for LayoutLMv2.""" import collections import os import sys import unicodedata from typing import Dict, List, Optional, Tuple, Union from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **bbox** -- List of bounding boxes to be fed to a model. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`). """ def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens table = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P")) def subfinder(mylist, pattern): matches = [] indices = [] for idx, i in enumerate(range(len(mylist))): if mylist[i] == pattern[0] and mylist[i : i + len(pattern)] == pattern: matches.append(pattern) indices.append(idx) if matches: return matches[0], indices[0] else: return None, 0 class LayoutLMv2Tokenizer(PreTrainedTokenizer): r""" Construct a LayoutLMv2 tokenizer. Based on WordPiece. [`LayoutLMv2Tokenizer`] can be used to turn words, word-level bounding boxes and optional word labels to token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and optional `labels` (for token classification). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. [`LayoutLMv2Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the word-level bounding boxes into token-level bounding boxes. """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, model_max_length: int = 512, additional_special_tokens: Optional[List[str]] = None, **kwargs, ): sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token mask_token = AddedToken(mask_token, special=True) if isinstance(mask_token, str) else mask_token if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, model_max_length=model_max_length, additional_special_tokens=additional_special_tokens, **kwargs, ) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) batch_outputs = self._batch_prepare_for_model( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_text_or_text_pairs, is_pair: bool = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model( batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING) def encode( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> List[int]: encoded_inputs = self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) return encoded_inputs["input_ids"] @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, `__call__` should be used instead. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) return self.prepare_for_model( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into token-level `labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) tokens = [] pair_tokens = [] token_boxes = [] pair_token_boxes = [] labels = [] if text_pair is None: if word_labels is None: # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference) for word, box in zip(text, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) else: # CASE 2: token classification (training) for word, box, label in zip(text, boxes, word_labels): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) if self.only_label_first_subword: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: # CASE 3: document visual question answering (inference) # text = question # text_pair = words tokens = self.tokenize(text) token_boxes = [self.pad_token_box for _ in range(len(tokens))] for word, box in zip(text_pair, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) pair_token_boxes.extend([box] * len(word_tokens)) # Create ids + pair_ids ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None if ( return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is not None ): raise ValueError( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) # Compute the total size of the returned encodings pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) = self.truncate_sequences( ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes encoded_inputs["overflowing_labels"] = overflowing_labels encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box] if pair_token_boxes: pair_token_boxes = pair_token_boxes + [self.sep_token_box] if labels: labels = [self.pad_token_label] + labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["bbox"] = token_boxes + pair_token_boxes if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if labels: encoded_inputs["labels"] = labels # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: List[int], token_boxes: List[List[int]], pair_ids: Optional[List[int]] = None, pair_token_boxes: Optional[List[List[int]]] = None, labels: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. token_boxes (`List[List[int]]`): Bounding boxes of the first sequence. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_token_boxes (`List[List[int]]`, *optional*): Bounding boxes of the second sequence. labels (`List[int]`, *optional*): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided. """ if num_tokens_to_remove <= 0: return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.ONLY_FIRST or ( truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None ): if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_token_boxes = token_boxes[-window_len:] overflowing_labels = labels[-window_len:] ids = ids[:-num_tokens_to_remove] token_boxes = token_boxes[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: error_msg = ( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " ) if truncation_strategy == TruncationStrategy.ONLY_FIRST: error_msg = ( error_msg + "Please select another truncation strategy than " f"{truncation_strategy}, for instance 'longest_first' or 'only_second'." ) logger.error(error_msg) elif truncation_strategy == TruncationStrategy.LONGEST_FIRST: logger.warning( "Be aware, overflowing tokens are not returned for the setting you have chosen," f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' " "truncation strategy. So the returned list will always be empty even if some " "tokens have been removed." ) for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): ids = ids[:-1] token_boxes = token_boxes[:-1] labels = labels[:-1] else: pair_ids = pair_ids[:-1] pair_token_boxes = pair_token_boxes[:-1] elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_token_boxes = pair_token_boxes[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_first'." ) return ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. Args: text: A single token or whitespace separated tokens. This should have already been passed through *BasicTokenizer*. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mluke/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _import_structure = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_mluke"] = ["MLukeTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mluke/tokenization_mluke.py
# coding=utf-8 # Copyright 2021 Studio Ousia and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for mLUKE.""" import itertools import json import os from collections.abc import Mapping from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, PaddingStrategy, TensorType, TextInput, TextInputPair, TruncationStrategy, to_py_obj, ) from ...utils import add_end_docstrings, is_tf_tensor, is_torch_tensor, logging logger = logging.get_logger(__name__) EntitySpan = Tuple[int, int] EntitySpanInput = List[EntitySpan] Entity = str EntityInput = List[Entity] SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "entity_vocab_file": "entity_vocab.json"} ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **entity_ids** -- List of entity ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **entity_position_ids** -- List of entity positions in the input sequence to be fed to a model. - **entity_token_type_ids** -- List of entity token type ids to be fed to a model (when `return_token_type_ids=True` or if *"entity_token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **entity_attention_mask** -- List of indices specifying which entities should be attended to by the model (when `return_attention_mask=True` or if *"entity_attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **entity_start_positions** -- List of the start positions of entities in the word token sequence (when `task="entity_span_classification"`). - **entity_end_positions** -- List of the end positions of entities in the word token sequence (when `task="entity_span_classification"`). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`) """ class MLukeTokenizer(PreTrainedTokenizer): """ Adapted from [`XLMRobertaTokenizer`] and [`LukeTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. entity_vocab_file (`str`): Path to the entity vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. task (`str`, *optional*): Task for which you want to prepare sequences. One of `"entity_classification"`, `"entity_pair_classification"`, or `"entity_span_classification"`. If you specify this argument, the entity sequence is automatically created based on the given entity span(s). max_entity_length (`int`, *optional*, defaults to 32): The maximum length of `entity_ids`. max_mention_length (`int`, *optional*, defaults to 30): The maximum number of tokens inside an entity span. entity_token_1 (`str`, *optional*, defaults to `<ent>`): The special token used to represent an entity span in a word token sequence. This token is only used when `task` is set to `"entity_classification"` or `"entity_pair_classification"`. entity_token_2 (`str`, *optional*, defaults to `<ent2>`): The special token used to represent an entity span in a word token sequence. This token is only used when `task` is set to `"entity_pair_classification"`. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, entity_vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", task=None, max_entity_length=32, max_mention_length=30, entity_token_1="<ent>", entity_token_2="<ent2>", entity_unk_token="[UNK]", entity_pad_token="[PAD]", entity_mask_token="[MASK]", entity_mask2_token="[MASK2]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token # we add 2 special tokens for downstream tasks # for more information about lstrip and rstrip, see https://github.com/huggingface/transformers/pull/2778 entity_token_1 = ( AddedToken(entity_token_1, lstrip=False, rstrip=False) if isinstance(entity_token_1, str) else entity_token_1 ) entity_token_2 = ( AddedToken(entity_token_2, lstrip=False, rstrip=False) if isinstance(entity_token_2, str) else entity_token_2 ) additional_special_tokens = kwargs.pop("additional_special_tokens", []) additional_special_tokens += [entity_token_1, entity_token_2] self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} with open(entity_vocab_file, encoding="utf-8") as entity_vocab_handle: self.entity_vocab = json.load(entity_vocab_handle) for entity_special_token in [entity_unk_token, entity_pad_token, entity_mask_token, entity_mask2_token]: if entity_special_token not in self.entity_vocab: raise ValueError( f"Specified entity special token ``{entity_special_token}`` is not found in entity_vocab. " f"Probably an incorrect entity vocab file is loaded: {entity_vocab_file}." ) self.entity_unk_token_id = self.entity_vocab[entity_unk_token] self.entity_pad_token_id = self.entity_vocab[entity_pad_token] self.entity_mask_token_id = self.entity_vocab[entity_mask_token] self.entity_mask2_token_id = self.entity_vocab[entity_mask2_token] self.task = task if task is None or task == "entity_span_classification": self.max_entity_length = max_entity_length elif task == "entity_classification": self.max_entity_length = 1 elif task == "entity_pair_classification": self.max_entity_length = 2 else: raise ValueError( f"Task {task} not supported. Select task from ['entity_classification', 'entity_pair_classification'," " 'entity_span_classification'] only." ) self.max_mention_length = max_mention_length super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, task=task, max_entity_length=max_entity_length, max_mention_length=max_mention_length, entity_token_1=entity_token_1, entity_token_2=entity_token_2, entity_unk_token=entity_unk_token, entity_pad_token=entity_pad_token, entity_mask_token=entity_mask_token, entity_mask2_token=entity_mask2_token, additional_special_tokens=additional_special_tokens, **kwargs, ) @property # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.vocab_size def vocab_size(self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.get_vocab def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer._tokenize def _tokenize(self, text: str) -> List[str]: # TODO check if the t5/llama PR also applies here return self.sp_model.encode(text, out_type=str) # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer.__call__ def __call__( self, text: Union[TextInput, List[TextInput]], text_pair: Optional[Union[TextInput, List[TextInput]]] = None, entity_spans: Optional[Union[EntitySpanInput, List[EntitySpanInput]]] = None, entity_spans_pair: Optional[Union[EntitySpanInput, List[EntitySpanInput]]] = None, entities: Optional[Union[EntityInput, List[EntityInput]]] = None, entities_pair: Optional[Union[EntityInput, List[EntityInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, is_split_into_words: Optional[bool] = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences, depending on the task you want to prepare them for. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings. text_pair (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this tokenizer does not support tokenization based on pretokenized strings. entity_spans (`List[Tuple[int, int]]`, `List[List[Tuple[int, int]]]`, *optional*): The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify `"entity_classification"` or `"entity_pair_classification"` as the `task` argument in the constructor, the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each sequence must be equal to the length of each sequence of `entities`. entity_spans_pair (`List[Tuple[int, int]]`, `List[List[Tuple[int, int]]]`, *optional*): The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each with two integers denoting character-based start and end positions of entities. If you specify the `task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the length of each sequence must be equal to the length of each sequence of `entities_pair`. entities (`List[str]`, `List[List[str]]`, *optional*): The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of each sequence must be equal to the length of each sequence of `entity_spans`. If you specify `entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity. entities_pair (`List[str]`, `List[List[str]]`, *optional*): The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify `entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity sequences is automatically constructed by filling it with the [MASK] entity. max_entity_length (`int`, *optional*): The maximum length of `entity_ids`. """ # Input type checking for clearer error is_valid_single_text = isinstance(text, str) is_valid_batch_text = isinstance(text, (list, tuple)) and (len(text) == 0 or (isinstance(text[0], str))) if not (is_valid_single_text or is_valid_batch_text): raise ValueError("text input must be of type `str` (single example) or `List[str]` (batch).") is_valid_single_text_pair = isinstance(text_pair, str) is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and ( len(text_pair) == 0 or isinstance(text_pair[0], str) ) if not (text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair): raise ValueError("text_pair input must be of type `str` (single example) or `List[str]` (batch).") is_batched = bool(isinstance(text, (list, tuple))) if is_batched: batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text if entities is None: batch_entities_or_entities_pairs = None else: batch_entities_or_entities_pairs = ( list(zip(entities, entities_pair)) if entities_pair is not None else entities ) if entity_spans is None: batch_entity_spans_or_entity_spans_pairs = None else: batch_entity_spans_or_entity_spans_pairs = ( list(zip(entity_spans, entity_spans_pair)) if entity_spans_pair is not None else entity_spans ) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs, batch_entities_or_entities_pairs=batch_entities_or_entities_pairs, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, entities=entities, entities_pair=entities_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._encode_plus def _encode_plus( self, text: Union[TextInput], text_pair: Optional[Union[TextInput]] = None, entity_spans: Optional[EntitySpanInput] = None, entity_spans_pair: Optional[EntitySpanInput] = None, entities: Optional[EntityInput] = None, entities_pair: Optional[EntityInput] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, is_split_into_words: Optional[bool] = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) if is_split_into_words: raise NotImplementedError("is_split_into_words is not supported in this tokenizer.") ( first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans, ) = self._create_input_sequence( text=text, text_pair=text_pair, entities=entities, entities_pair=entities_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, **kwargs, ) # prepare_for_model will create the attention_mask and token_type_ids return self.prepare_for_model( first_ids, pair_ids=second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._batch_encode_plus def _batch_encode_plus( self, batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair]], batch_entity_spans_or_entity_spans_pairs: Optional[ Union[List[EntitySpanInput], List[Tuple[EntitySpanInput, EntitySpanInput]]] ] = None, batch_entities_or_entities_pairs: Optional[ Union[List[EntityInput], List[Tuple[EntityInput, EntityInput]]] ] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, is_split_into_words: Optional[bool] = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) if is_split_into_words: raise NotImplementedError("is_split_into_words is not supported in this tokenizer.") # input_ids is a list of tuples (one for each example in the batch) input_ids = [] entity_ids = [] entity_token_spans = [] for index, text_or_text_pair in enumerate(batch_text_or_text_pairs): if not isinstance(text_or_text_pair, (list, tuple)): text, text_pair = text_or_text_pair, None else: text, text_pair = text_or_text_pair entities, entities_pair = None, None if batch_entities_or_entities_pairs is not None: entities_or_entities_pairs = batch_entities_or_entities_pairs[index] if entities_or_entities_pairs: if isinstance(entities_or_entities_pairs[0], str): entities, entities_pair = entities_or_entities_pairs, None else: entities, entities_pair = entities_or_entities_pairs entity_spans, entity_spans_pair = None, None if batch_entity_spans_or_entity_spans_pairs is not None: entity_spans_or_entity_spans_pairs = batch_entity_spans_or_entity_spans_pairs[index] if len(entity_spans_or_entity_spans_pairs) > 0 and isinstance( entity_spans_or_entity_spans_pairs[0], list ): entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs else: entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs, None ( first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans, ) = self._create_input_sequence( text=text, text_pair=text_pair, entities=entities, entities_pair=entities_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, **kwargs, ) input_ids.append((first_ids, second_ids)) entity_ids.append((first_entity_ids, second_entity_ids)) entity_token_spans.append((first_entity_token_spans, second_entity_token_spans)) batch_outputs = self._batch_prepare_for_model( input_ids, batch_entity_ids_pairs=entity_ids, batch_entity_token_spans_pairs=entity_token_spans, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._check_entity_input_format def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]): if not isinstance(entity_spans, list): raise ValueError("entity_spans should be given as a list") elif len(entity_spans) > 0 and not isinstance(entity_spans[0], tuple): raise ValueError( "entity_spans should be given as a list of tuples containing the start and end character indices" ) if entities is not None: if not isinstance(entities, list): raise ValueError("If you specify entities, they should be given as a list") if len(entities) > 0 and not isinstance(entities[0], str): raise ValueError("If you specify entities, they should be given as a list of entity names") if len(entities) != len(entity_spans): raise ValueError("If you specify entities, entities and entity_spans must be the same length") # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._create_input_sequence def _create_input_sequence( self, text: Union[TextInput], text_pair: Optional[Union[TextInput]] = None, entities: Optional[EntityInput] = None, entities_pair: Optional[EntityInput] = None, entity_spans: Optional[EntitySpanInput] = None, entity_spans_pair: Optional[EntitySpanInput] = None, **kwargs, ) -> Tuple[list, list, list, list, list, list]: def get_input_ids(text): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) def get_input_ids_and_entity_token_spans(text, entity_spans): if entity_spans is None: return get_input_ids(text), None cur = 0 input_ids = [] entity_token_spans = [None] * len(entity_spans) split_char_positions = sorted(frozenset(itertools.chain(*entity_spans))) char_pos2token_pos = {} for split_char_position in split_char_positions: orig_split_char_position = split_char_position if ( split_char_position > 0 and text[split_char_position - 1] == " " ): # whitespace should be prepended to the following token split_char_position -= 1 if cur != split_char_position: input_ids += get_input_ids(text[cur:split_char_position]) cur = split_char_position char_pos2token_pos[orig_split_char_position] = len(input_ids) input_ids += get_input_ids(text[cur:]) entity_token_spans = [ (char_pos2token_pos[char_start], char_pos2token_pos[char_end]) for char_start, char_end in entity_spans ] return input_ids, entity_token_spans first_ids, second_ids = None, None first_entity_ids, second_entity_ids = None, None first_entity_token_spans, second_entity_token_spans = None, None if self.task is None: if entity_spans is None: first_ids = get_input_ids(text) else: self._check_entity_input_format(entities, entity_spans) first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) if entities is None: first_entity_ids = [self.entity_mask_token_id] * len(entity_spans) else: first_entity_ids = [self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities] if text_pair is not None: if entity_spans_pair is None: second_ids = get_input_ids(text_pair) else: self._check_entity_input_format(entities_pair, entity_spans_pair) second_ids, second_entity_token_spans = get_input_ids_and_entity_token_spans( text_pair, entity_spans_pair ) if entities_pair is None: second_entity_ids = [self.entity_mask_token_id] * len(entity_spans_pair) else: second_entity_ids = [ self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities_pair ] elif self.task == "entity_classification": if not (isinstance(entity_spans, list) and len(entity_spans) == 1 and isinstance(entity_spans[0], tuple)): raise ValueError( "Entity spans should be a list containing a single tuple " "containing the start and end character indices of an entity" ) first_entity_ids = [self.entity_mask_token_id] first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) # add special tokens to input ids entity_token_start, entity_token_end = first_entity_token_spans[0] first_ids = ( first_ids[:entity_token_end] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_end:] ) first_ids = ( first_ids[:entity_token_start] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_start:] ) first_entity_token_spans = [(entity_token_start, entity_token_end + 2)] elif self.task == "entity_pair_classification": if not ( isinstance(entity_spans, list) and len(entity_spans) == 2 and isinstance(entity_spans[0], tuple) and isinstance(entity_spans[1], tuple) ): raise ValueError( "Entity spans should be provided as a list of two tuples, " "each tuple containing the start and end character indices of an entity" ) head_span, tail_span = entity_spans first_entity_ids = [self.entity_mask_token_id, self.entity_mask2_token_id] first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) head_token_span, tail_token_span = first_entity_token_spans token_span_with_special_token_ids = [ (head_token_span, self.additional_special_tokens_ids[0]), (tail_token_span, self.additional_special_tokens_ids[1]), ] if head_token_span[0] < tail_token_span[0]: first_entity_token_spans[0] = (head_token_span[0], head_token_span[1] + 2) first_entity_token_spans[1] = (tail_token_span[0] + 2, tail_token_span[1] + 4) token_span_with_special_token_ids = reversed(token_span_with_special_token_ids) else: first_entity_token_spans[0] = (head_token_span[0] + 2, head_token_span[1] + 4) first_entity_token_spans[1] = (tail_token_span[0], tail_token_span[1] + 2) for (entity_token_start, entity_token_end), special_token_id in token_span_with_special_token_ids: first_ids = first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:] first_ids = first_ids[:entity_token_start] + [special_token_id] + first_ids[entity_token_start:] elif self.task == "entity_span_classification": if not (isinstance(entity_spans, list) and len(entity_spans) > 0 and isinstance(entity_spans[0], tuple)): raise ValueError( "Entity spans should be provided as a list of tuples, " "each tuple containing the start and end character indices of an entity" ) first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans) first_entity_ids = [self.entity_mask_token_id] * len(entity_spans) else: raise ValueError(f"Task {self.task} not supported") return ( first_ids, second_ids, first_entity_ids, second_entity_ids, first_entity_token_spans, second_entity_token_spans, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._batch_prepare_for_model def _batch_prepare_for_model( self, batch_ids_pairs: List[Tuple[List[int], None]], batch_entity_ids_pairs: List[Tuple[Optional[List[int]], Optional[List[int]]]], batch_entity_token_spans_pairs: List[Tuple[Optional[List[Tuple[int, int]]], Optional[List[Tuple[int, int]]]]], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens Args: batch_ids_pairs: list of tokenized input ids or input ids pairs batch_entity_ids_pairs: list of entity ids or entity ids pairs batch_entity_token_spans_pairs: list of entity spans or entity spans pairs max_entity_length: The maximum length of the entity sequence. """ batch_outputs = {} for input_ids, entity_ids, entity_token_span_pairs in zip( batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs ): first_ids, second_ids = input_ids first_entity_ids, second_entity_ids = entity_ids first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs outputs = self.prepare_for_model( first_ids, second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer.prepare_for_model def prepare_for_model( self, ids: List[int], pair_ids: Optional[List[int]] = None, entity_ids: Optional[List[int]] = None, pair_entity_ids: Optional[List[int]] = None, entity_token_spans: Optional[List[Tuple[int, int]]] = None, pair_entity_token_spans: Optional[List[Tuple[int, int]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence of input id, entity id and entity span, or a pair of sequences of inputs ids, entity ids, entity spans so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Args: ids (`List[int]`): Tokenized input ids of the first sequence. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. entity_ids (`List[int]`, *optional*): Entity ids of the first sequence. pair_entity_ids (`List[int]`, *optional*): Entity ids of the second sequence. entity_token_spans (`List[Tuple[int, int]]`, *optional*): Entity spans of the first sequence. pair_entity_token_spans (`List[Tuple[int, int]]`, *optional*): Entity spans of the second sequence. max_entity_length (`int`, *optional*): The maximum length of the entity sequence. """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) # Compute lengths pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) if ( return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is not None ): raise ValueError( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} # Compute the total size of the returned word encodings total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length and max_entity_length overflowing_tokens = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: # truncate words up to max_length ids, pair_ids, overflowing_tokens = self.truncate_sequences( ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) entity_token_offset = 1 # 1 * <s> token pair_entity_token_offset = len(ids) + 3 # 1 * <s> token & 2 * <sep> tokens else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) entity_token_offset = 0 pair_entity_token_offset = len(ids) # Build output dictionary encoded_inputs["input_ids"] = sequence if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) # Set max entity length if not max_entity_length: max_entity_length = self.max_entity_length if entity_ids is not None: total_entity_len = 0 num_invalid_entities = 0 valid_entity_ids = [ent_id for ent_id, span in zip(entity_ids, entity_token_spans) if span[1] <= len(ids)] valid_entity_token_spans = [span for span in entity_token_spans if span[1] <= len(ids)] total_entity_len += len(valid_entity_ids) num_invalid_entities += len(entity_ids) - len(valid_entity_ids) valid_pair_entity_ids, valid_pair_entity_token_spans = None, None if pair_entity_ids is not None: valid_pair_entity_ids = [ ent_id for ent_id, span in zip(pair_entity_ids, pair_entity_token_spans) if span[1] <= len(pair_ids) ] valid_pair_entity_token_spans = [span for span in pair_entity_token_spans if span[1] <= len(pair_ids)] total_entity_len += len(valid_pair_entity_ids) num_invalid_entities += len(pair_entity_ids) - len(valid_pair_entity_ids) if num_invalid_entities != 0: logger.warning( f"{num_invalid_entities} entities are ignored because their entity spans are invalid due to the" " truncation of input tokens" ) if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and total_entity_len > max_entity_length: # truncate entities up to max_entity_length valid_entity_ids, valid_pair_entity_ids, overflowing_entities = self.truncate_sequences( valid_entity_ids, pair_ids=valid_pair_entity_ids, num_tokens_to_remove=total_entity_len - max_entity_length, truncation_strategy=truncation_strategy, stride=stride, ) valid_entity_token_spans = valid_entity_token_spans[: len(valid_entity_ids)] if valid_pair_entity_token_spans is not None: valid_pair_entity_token_spans = valid_pair_entity_token_spans[: len(valid_pair_entity_ids)] if return_overflowing_tokens: encoded_inputs["overflowing_entities"] = overflowing_entities encoded_inputs["num_truncated_entities"] = total_entity_len - max_entity_length final_entity_ids = valid_entity_ids + valid_pair_entity_ids if valid_pair_entity_ids else valid_entity_ids encoded_inputs["entity_ids"] = list(final_entity_ids) entity_position_ids = [] entity_start_positions = [] entity_end_positions = [] for token_spans, offset in ( (valid_entity_token_spans, entity_token_offset), (valid_pair_entity_token_spans, pair_entity_token_offset), ): if token_spans is not None: for start, end in token_spans: start += offset end += offset position_ids = list(range(start, end))[: self.max_mention_length] position_ids += [-1] * (self.max_mention_length - end + start) entity_position_ids.append(position_ids) entity_start_positions.append(start) entity_end_positions.append(end - 1) encoded_inputs["entity_position_ids"] = entity_position_ids if self.task == "entity_span_classification": encoded_inputs["entity_start_positions"] = entity_start_positions encoded_inputs["entity_end_positions"] = entity_end_positions if return_token_type_ids: encoded_inputs["entity_token_type_ids"] = [0] * len(encoded_inputs["entity_ids"]) # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, max_entity_length=max_entity_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer.pad def pad( self, encoded_inputs: Union[ BatchEncoding, List[BatchEncoding], Dict[str, EncodedInput], Dict[str, List[EncodedInput]], List[Dict[str, EncodedInput]], ], padding: Union[bool, str, PaddingStrategy] = True, max_length: Optional[int] = None, max_entity_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, verbose: bool = True, ) -> BatchEncoding: """ Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`) .. note:: If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the specific device of your tensors however. Args: encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`): Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str, List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function. Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). max_entity_length (`int`, *optional*): The maximum length of the entity sequence. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. """ # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping): encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()} # The model's main input name, usually `input_ids`, has be passed for padding if self.model_input_names[0] not in encoded_inputs: raise ValueError( "You should supply an encoding or a list of encodings to this method " f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}" ) required_input = encoded_inputs[self.model_input_names[0]] if not required_input: if return_attention_mask: encoded_inputs["attention_mask"] = [] return encoded_inputs # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch first_element = required_input[0] if isinstance(first_element, (list, tuple)): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. index = 0 while len(required_input[index]) == 0: index += 1 if index < len(required_input): first_element = required_input[index][0] # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do. if not isinstance(first_element, (int, list, tuple)): if is_tf_tensor(first_element): return_tensors = "tf" if return_tensors is None else return_tensors elif is_torch_tensor(first_element): return_tensors = "pt" if return_tensors is None else return_tensors elif isinstance(first_element, np.ndarray): return_tensors = "np" if return_tensors is None else return_tensors else: raise ValueError( f"type of {first_element} unknown: {type(first_element)}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in encoded_inputs.items(): encoded_inputs[key] = to_py_obj(value) # Convert padding_strategy in PaddingStrategy padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies( padding=padding, max_length=max_length, verbose=verbose ) if max_entity_length is None: max_entity_length = self.max_entity_length required_input = encoded_inputs[self.model_input_names[0]] if required_input and not isinstance(required_input[0], (list, tuple)): encoded_inputs = self._pad( encoded_inputs, max_length=max_length, max_entity_length=max_entity_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) return BatchEncoding(encoded_inputs, tensor_type=return_tensors) batch_size = len(required_input) if any(len(v) != batch_size for v in encoded_inputs.values()): raise ValueError("Some items in the output dictionary have a different batch size than others.") if padding_strategy == PaddingStrategy.LONGEST: max_length = max(len(inputs) for inputs in required_input) max_entity_length = ( max(len(inputs) for inputs in encoded_inputs["entity_ids"]) if "entity_ids" in encoded_inputs else 0 ) padding_strategy = PaddingStrategy.MAX_LENGTH batch_outputs = {} for i in range(batch_size): inputs = {k: v[i] for k, v in encoded_inputs.items()} outputs = self._pad( inputs, max_length=max_length, max_entity_length=max_entity_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) return BatchEncoding(batch_outputs, tensor_type=return_tensors) # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._pad def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, max_entity_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. max_entity_length: The maximum length of the entity sequence. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ entities_provided = bool("entity_ids" in encoded_inputs) # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if padding_strategy == PaddingStrategy.LONGEST: max_length = len(encoded_inputs["input_ids"]) if entities_provided: max_entity_length = len(encoded_inputs["entity_ids"]) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of if ( entities_provided and max_entity_length is not None and pad_to_multiple_of is not None and (max_entity_length % pad_to_multiple_of != 0) ): max_entity_length = ((max_entity_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and ( len(encoded_inputs["input_ids"]) != max_length or (entities_provided and len(encoded_inputs["entity_ids"]) != max_entity_length) ) # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) if entities_provided and return_attention_mask and "entity_attention_mask" not in encoded_inputs: encoded_inputs["entity_attention_mask"] = [1] * len(encoded_inputs["entity_ids"]) if needs_to_be_padded: difference = max_length - len(encoded_inputs["input_ids"]) if entities_provided: entity_difference = max_entity_length - len(encoded_inputs["entity_ids"]) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if entities_provided: encoded_inputs["entity_attention_mask"] = ( encoded_inputs["entity_attention_mask"] + [0] * entity_difference ) if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"] + [0] * difference if entities_provided: encoded_inputs["entity_token_type_ids"] = ( encoded_inputs["entity_token_type_ids"] + [0] * entity_difference ) if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference if entities_provided: encoded_inputs["entity_ids"] = ( encoded_inputs["entity_ids"] + [self.entity_pad_token_id] * entity_difference ) encoded_inputs["entity_position_ids"] = ( encoded_inputs["entity_position_ids"] + [[-1] * self.max_mention_length] * entity_difference ) if self.task == "entity_span_classification": encoded_inputs["entity_start_positions"] = ( encoded_inputs["entity_start_positions"] + [0] * entity_difference ) encoded_inputs["entity_end_positions"] = ( encoded_inputs["entity_end_positions"] + [0] * entity_difference ) elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if entities_provided: encoded_inputs["entity_attention_mask"] = [0] * entity_difference + encoded_inputs[ "entity_attention_mask" ] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [0] * difference + encoded_inputs["token_type_ids"] if entities_provided: encoded_inputs["entity_token_type_ids"] = [0] * entity_difference + encoded_inputs[ "entity_token_type_ids" ] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"] if entities_provided: encoded_inputs["entity_ids"] = [self.entity_pad_token_id] * entity_difference + encoded_inputs[ "entity_ids" ] encoded_inputs["entity_position_ids"] = [ [-1] * self.max_mention_length ] * entity_difference + encoded_inputs["entity_position_ids"] if self.task == "entity_span_classification": encoded_inputs["entity_start_positions"] = [0] * entity_difference + encoded_inputs[ "entity_start_positions" ] encoded_inputs["entity_end_positions"] = [0] * entity_difference + encoded_inputs[ "entity_end_positions" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str, str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) entity_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["entity_vocab_file"] ) with open(entity_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.entity_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return out_vocab_file, entity_vocab_file # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.create_token_type_ids_from_sequences def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert mLUKE checkpoint.""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def convert_luke_checkpoint(checkpoint_path, metadata_path, entity_vocab_path, pytorch_dump_folder_path, model_size): # Load configuration defined in the metadata file with open(metadata_path) as metadata_file: metadata = json.load(metadata_file) config = LukeConfig(use_entity_aware_attention=True, **metadata["model_config"]) # Load in the weights from the checkpoint_path state_dict = torch.load(checkpoint_path, map_location="cpu")["module"] # Load the entity vocab file entity_vocab = load_original_entity_vocab(entity_vocab_path) # add an entry for [MASK2] entity_vocab["[MASK2]"] = max(entity_vocab.values()) + 1 config.entity_vocab_size += 1 tokenizer = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"]) # Add special tokens to the token vocabulary for downstream tasks entity_token_1 = AddedToken("<ent>", lstrip=False, rstrip=False) entity_token_2 = AddedToken("<ent2>", lstrip=False, rstrip=False) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_1, entity_token_2]}) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}") tokenizer.save_pretrained(pytorch_dump_folder_path) with open(os.path.join(pytorch_dump_folder_path, "tokenizer_config.json"), "r") as f: tokenizer_config = json.load(f) tokenizer_config["tokenizer_class"] = "MLukeTokenizer" with open(os.path.join(pytorch_dump_folder_path, "tokenizer_config.json"), "w") as f: json.dump(tokenizer_config, f) with open(os.path.join(pytorch_dump_folder_path, MLukeTokenizer.vocab_files_names["entity_vocab_file"]), "w") as f: json.dump(entity_vocab, f) tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path) # Initialize the embeddings of the special tokens ent_init_index = tokenizer.convert_tokens_to_ids(["@"])[0] ent2_init_index = tokenizer.convert_tokens_to_ids(["#"])[0] word_emb = state_dict["embeddings.word_embeddings.weight"] ent_emb = word_emb[ent_init_index].unsqueeze(0) ent2_emb = word_emb[ent2_init_index].unsqueeze(0) state_dict["embeddings.word_embeddings.weight"] = torch.cat([word_emb, ent_emb, ent2_emb]) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: decoder_bias = state_dict[bias_name] ent_decoder_bias = decoder_bias[ent_init_index].unsqueeze(0) ent2_decoder_bias = decoder_bias[ent2_init_index].unsqueeze(0) state_dict[bias_name] = torch.cat([decoder_bias, ent_decoder_bias, ent2_decoder_bias]) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers): for matrix_name in ["query.weight", "query.bias"]: prefix = f"encoder.layer.{layer_index}.attention.self." state_dict[prefix + "w2e_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2w_" + matrix_name] = state_dict[prefix + matrix_name] state_dict[prefix + "e2e_" + matrix_name] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks entity_emb = state_dict["entity_embeddings.entity_embeddings.weight"] entity_mask_emb = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0) state_dict["entity_embeddings.entity_embeddings.weight"] = torch.cat([entity_emb, entity_mask_emb]) # add [MASK2] for 'entity_predictions.bias' entity_prediction_bias = state_dict["entity_predictions.bias"] entity_mask_bias = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0) state_dict["entity_predictions.bias"] = torch.cat([entity_prediction_bias, entity_mask_bias]) model = LukeForMaskedLM(config=config).eval() state_dict.pop("entity_predictions.decoder.weight") state_dict.pop("lm_head.decoder.weight") state_dict.pop("lm_head.decoder.bias") state_dict_for_hugging_face = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head") or key.startswith("entity_predictions")): state_dict_for_hugging_face[f"luke.{key}"] = state_dict[key] else: state_dict_for_hugging_face[key] = state_dict[key] missing_keys, unexpected_keys = model.load_state_dict(state_dict_for_hugging_face, strict=False) if set(unexpected_keys) != {"luke.embeddings.position_ids"}: raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}") if set(missing_keys) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"Unexpected missing_keys: {missing_keys}") model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path, task="entity_classification") text = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." span = (0, 9) encoding = tokenizer(text, entity_spans=[span], return_tensors="pt") outputs = model(**encoding) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base expected_shape = torch.Size((1, 33, 768)) expected_slice = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]]) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base expected_shape = torch.Size((1, 1, 768)) expected_slice = torch.tensor([[-0.1482, 0.0609, 0.0322]]) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4): raise ValueError # Verify masked word/entity prediction tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path) text = "Tokyo is the capital of <mask>." span = (24, 30) encoding = tokenizer(text, entity_spans=[span], return_tensors="pt") outputs = model(**encoding) input_ids = encoding["input_ids"][0].tolist() mask_position_id = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>")) predicted_id = outputs.logits[0][mask_position_id].argmax(dim=-1) assert "Japan" == tokenizer.decode(predicted_id) predicted_entity_id = outputs.entity_logits[0][0].argmax().item() multilingual_predicted_entities = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:")][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(pytorch_dump_folder_path)) model.save_pretrained(pytorch_dump_folder_path) def load_original_entity_vocab(entity_vocab_path): SPECIAL_TOKENS = ["[MASK]", "[PAD]", "[UNK]"] data = [json.loads(line) for line in open(entity_vocab_path)] new_mapping = {} for entry in data: entity_id = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: new_mapping[entity_name] = entity_id break new_entity_name = f"{language}:{entity_name}" new_mapping[new_entity_name] = entity_id return new_mapping if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) args = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/pegasus_x/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_pegasus_x"] = [ "PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST", "PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/pegasus_x/configuration_pegasus_x.py
# coding=utf-8 # Copyright 2022, Google and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PEGASUS-X model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class PegasusXConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PegasusXModel`]. It is used to instantiate a PEGASUS-X model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PEGASUS-X [google/pegasus-x-large](https://huggingface.co/google/pegasus-x-large) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 96103): Vocabulary size of the PEGASUS-X model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`PegasusXModel`]. d_model (`int`, *optional*, defaults to 1024): Dimension of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 16): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 16): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 16384): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 1): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. num_global_tokens (`int`, *optional*, defaults to 128): Number of global tokens to use for the encoder block_size (`int`, *optional*, defaults to 512): Block size for encoder local attention. Sequence length should be an exact multiple of block size. block_size must be a multiple of 2 if stagger_local_block is True stagger_local_block (`bool`, *optional*, defaults to `True`): Whether to stagger every other local attention by half a block Example: ```python >>> from transformers import PegasusXConfig, PegasusXModel >>> # Initializing a PEGASUS google/pegasus-x-large style configuration >>> configuration = PegasusXConfig() >>> # Initializing a model (with random weights) from the google/pegasus-x-large style configuration >>> model = PegasusXModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "pegasus_x" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=96103, max_position_embeddings=16384, encoder_layers=16, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=16, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=0, scale_embedding=True, pad_token_id=0, eos_token_id=1, forced_eos_token_id=1, num_global_tokens=32, block_size=512, stagger_local_blocks=True, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.num_global_tokens = num_global_tokens self.block_size = block_size self.stagger_local_blocks = stagger_local_blocks super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs, ) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/pegasus_x/modeling_pegasus_x.py
# coding=utf-8 # Copyright 2022, Google and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch PEGASUS-X model.""" import dataclasses import math from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_pegasus_x import PegasusXConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/pegasus-x-base" _CONFIG_FOR_DOC = "PegasusXConfig" from ..deprecated._archive_maps import PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 @dataclasses.dataclass class DimensionInfo: """Wrapper for dimension info.""" batch_size: int # batch size seq_len: int # token length block_size: int # block size num_heads: int # num heads hidden_dim: int # hidden dim dim_per_head: int # dim per head num_blocks: int # num blocks global_len: int # global length padded_seq_len: int # padded token seq length # Note: Compared to the original Flax implementation, we will pad the token representations to # a multiple of block size at the start of the encoder layers, so T=P always. # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids # Copied from transformers.models.bart.modeling_bart.BartScaledWordEmbedding with Bart->PegasusX class PegasusXScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale class PegasusXSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, embed_dim, max_scale: int = 10000.0): super().__init__() self.embed_dim = embed_dim self.max_scale = max_scale @torch.no_grad() def forward(self, input_embeds: torch.Tensor, past_key_values_length: int = 0) -> torch.Tensor: """`input_ids_shape` is expected to be [bsz x seqlen].""" batch_size, seq_len = input_embeds.shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=input_embeds.device )[:, None] pe = torch.zeros((seq_len, self.embed_dim), device=input_embeds.device, dtype=input_embeds.dtype) half_d_feature = self.embed_dim // 2 div_term = torch.exp( torch.arange(half_d_feature, device=input_embeds.device, dtype=torch.int64).type_as(input_embeds) * -(np.log(float(self.max_scale)) / (half_d_feature - 1)) ) pe[:, :half_d_feature] = torch.sin(positions * div_term) pe[:, half_d_feature:] = torch.cos(positions * div_term) return pe[None].expand(batch_size, -1, -1) # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PegasusX class PegasusXAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[PegasusXConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class PegasusXGlobalLocalAttention(nn.Module): """Global + Local attention. For use with Encoder only.""" def __init__( self, embed_dim: int, num_heads: int, block_size: int, dropout: float = 0.0, is_decoder: bool = False, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.block_size = block_size self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=False) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=False) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=False) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, token_hidden_states: torch.Tensor, global_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" dim = DimensionInfo( batch_size=token_hidden_states.shape[0], seq_len=token_hidden_states.shape[1], block_size=self.block_size, num_heads=self.num_heads, hidden_dim=token_hidden_states.shape[2], dim_per_head=self.head_dim, num_blocks=token_hidden_states.shape[1] // self.block_size, global_len=global_hidden_states.shape[1], padded_seq_len=token_hidden_states.shape[1], ) # [batch_size, num_heads, padded_seq_len, dim_per_head] local_q = self._shape( self.q_proj(token_hidden_states) * self.scaling, seq_len=dim.padded_seq_len, bsz=dim.batch_size, ) local_k = self._shape( self.k_proj(token_hidden_states), seq_len=dim.padded_seq_len, bsz=dim.batch_size, ) local_v = self._shape( self.v_proj(token_hidden_states), seq_len=dim.padded_seq_len, bsz=dim.batch_size, ) # [batch_size, num_heads, global_len, dim_per_head] global_q = self._shape( self.q_proj(global_hidden_states) * self.scaling, seq_len=dim.global_len, bsz=dim.batch_size, ) global_k = self._shape( self.k_proj(global_hidden_states), seq_len=dim.global_len, bsz=dim.batch_size, ) global_v = self._shape( self.v_proj(global_hidden_states), seq_len=dim.global_len, bsz=dim.batch_size, ) global_attn_output, global_attn_probs = self.compute_global_attention_representations( global_q=global_q, global_k=global_k, global_v=global_v, local_k=local_k, local_v=local_v, mask=attention_mask, dim=dim, ) local_attn_output, local_attn_probs = self.compute_local_attention_representations( global_k=global_k, global_v=global_v, local_q=local_q, local_k=local_k, local_v=local_v, mask=attention_mask, dim=dim, ) # [batch_size, global_len, hidden_dim] global_attn_output = ( global_attn_output.transpose(1, 2).contiguous().view(dim.batch_size, dim.global_len, dim.hidden_dim) ) # [batch_size, global_len, hidden_dim] global_attn_output = self.out_proj(global_attn_output) # [batch_size, num_heads, block_size, num_heads, dim_per_head] local_attn_output = local_attn_output.permute(0, 2, 3, 1, 4).contiguous() # [batch_size, padded_seq_len, hidden_dim] local_attn_output = local_attn_output.view(dim.batch_size, dim.padded_seq_len, dim.hidden_dim) # [batch_size, padded_seq_len, hidden_dim] local_attn_output = self.out_proj(local_attn_output) if output_attentions: attn_probs = {"global": global_attn_probs, "local": local_attn_probs} else: attn_probs = None return local_attn_output, global_attn_output, attn_probs def compute_global_attention_representations( self, global_q, global_k, global_v, local_k, local_v, mask, dim: DimensionInfo ): """Compute attention representations for global tokens. Global tokens will attend to both global tokens as well as all input sequence tokens. Because the input sequence tokens are arranged in blocks for local attention, we unblock them and compute attention. Args: global_q (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: query vectors from global tokens global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: key vectors from global tokens global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: value vectors from global tokens local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: key vectors from local tokens local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: value vectors from local tokens mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask dim (DimensionInfo): DimensionInfo wrapper for dimensions Returns: output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size """ # [batch_size, num_heads, global_len+padded_seq_len, dim_per_head] global_and_local_k = torch.cat([global_k, local_k], dim=2) # [batch_size, num_heads, global_len+padded_seq_len, dim_per_head] global_and_local_v = torch.cat([global_v, local_v], dim=2) # [batch_size, global_len+padded_seq_len] extended_mask = nn.functional.pad(mask, pad=(dim.global_len, 0), value=0) # [batch_size, num_heads, global_len, global_len+padded_seq_len] attn_weights = torch.einsum("BHGF,BHXF->BHGX", global_q, global_and_local_k) attn_weights = attn_weights + extended_mask[:, None, None, :] attn_probs = nn.functional.softmax(attn_weights, dim=-1) attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) # [batch_size, num_heads, global_len, F] attn_output = torch.einsum("BHGX,BHXF->BHGF", attn_probs, global_and_local_v) return attn_output, attn_probs def compute_local_attention_representations( self, global_k, global_v, local_q, local_k, local_v, mask, dim: DimensionInfo ): """Compute attention representations for local tokens. Local tokens will attend to both global tokens as well as all other tokens within the same local block. Hence, we need to tile and concatenate the global tokens to every local block Args: global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: key vectors from global tokens global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: value vectors from global tokens local_q (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: query vectors from local tokens local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: key vectors from local tokens local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: value vectors from local tokens mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask dim (DimensionInfo): DimensionInfo wrapper for dimensions Returns: output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size """ # [batch_size, num_heads, num_blocks, block_size, dim_per_head] blocked_local_q = local_q.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head) # [batch_size, num_heads, num_blocks, block_size, dim_per_head] blocked_local_k = local_k.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head) # [batch_size, num_heads, num_blocks, block_size, dim_per_head] blocked_local_v = local_v.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head) # [batch_size, num_blocks, global_len+block_size] extended_mask = nn.functional.pad( mask.view(dim.batch_size, dim.num_blocks, dim.block_size), pad=(dim.global_len, 0), value=0, ) # [batch_size, num_heads, num_blocks, block_size, global_len] blocked_local2global = torch.einsum("BHNKF,BHGF->BHNKG", blocked_local_q, global_k) # [batch_size, num_heads, num_blocks, block_size, block_size] blocked_local2local = torch.einsum("BHNKF,BHNXF->BHNKX", blocked_local_q, blocked_local_k) # [batch_size, num_heads, num_blocks, block_size, global_len+block_size] attn_weights = torch.cat([blocked_local2global, blocked_local2local], dim=-1) attn_weights = attn_weights + extended_mask[:, None, :, None, :] attn_probs = nn.functional.softmax(attn_weights, dim=-1) attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) # [batch_size, num_heads, num_blocks, block_size, global_len] local2global_attn_probs = attn_probs[:, :, :, :, : dim.global_len] # [batch_size, num_heads, num_blocks, block_size, block_size] local2local_attn_probs = attn_probs[:, :, :, :, dim.global_len :] # [batch_size, num_heads, num_blocks, block_size, dim_per_head] local2global_attn_output = torch.einsum("BHNKG,BHGF->BHNKF", local2global_attn_probs, global_v) # [batch_size, num_heads, num_blocks, block_size, dim_per_head] local2local_attn_output = torch.einsum("BHNKX,BHNXF->BHNKF", local2local_attn_probs, blocked_local_v) # [batch_size, num_heads, num_blocks, block_size, dim_per_head] attn_output = local2global_attn_output + local2local_attn_output return attn_output, attn_probs class PegasusXEncoderLayer(nn.Module): def __init__(self, stagger_blocks_this_layer: bool, config: PegasusXConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = PegasusXGlobalLocalAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, block_size=config.block_size, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.global_self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) self.stagger_blocks_this_layer = stagger_blocks_this_layer self.block_size = config.block_size def forward( self, hidden_states: torch.Tensor, global_hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)* global_hidden_states (`torch.FloatTensor`): global token hidden states *(seq_len, num_global_tokens, embed_dim)* attention_mask (`torch.FloatTensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states global_residual = global_hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) global_hidden_states = self.global_self_attn_layer_norm(global_hidden_states) if self.stagger_blocks_this_layer: # Pad the blocks to simulate staggering hidden_states, attention_mask = self.pad_local_tokens( hidden_states=hidden_states, attention_mask=attention_mask, block_size=self.block_size ) hidden_states, global_hidden_states, attn_weights = self.self_attn( token_hidden_states=hidden_states, global_hidden_states=global_hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) if self.stagger_blocks_this_layer: # Undo the padding hidden_states = self.unpad_local_tokens(padded_hidden_states=hidden_states, block_size=self.block_size) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training) global_hidden_states = global_residual + global_hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states global_residual = global_hidden_states global_hidden_states = self.final_layer_norm(global_hidden_states) global_hidden_states = self.activation_fn(self.fc1(global_hidden_states)) global_hidden_states = nn.functional.dropout( global_hidden_states, p=self.activation_dropout, training=self.training ) global_hidden_states = self.fc2(global_hidden_states) global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training) global_hidden_states = global_residual + global_hidden_states outputs = (hidden_states, global_hidden_states) if output_attentions: outputs += (attn_weights,) return outputs @classmethod def pad_local_tokens(cls, hidden_states, attention_mask, block_size): # hidden_states: [batch_size, seq_len, hidden_dim] pad_size = block_size // 2 mask_min_value = torch.finfo(hidden_states.dtype).min padded_hidden_states = torch.nn.functional.pad( hidden_states, pad=(0, 0, pad_size, pad_size), ) padded_mask = torch.nn.functional.pad( attention_mask, pad=(pad_size, pad_size), value=mask_min_value, ) return padded_hidden_states, padded_mask @classmethod def unpad_local_tokens(cls, padded_hidden_states, block_size): # padded_hidden_states: [batch_size, padded seq_len, hidden_dim] pad_size = block_size // 2 return padded_hidden_states[:, pad_size:-pad_size, :] class PegasusXDecoderLayer(nn.Module): def __init__(self, config: PegasusXConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = PegasusXAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = PegasusXAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)* attention_mask (`torch.FloatTensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape *(seq_len, batch, embed_dim)* encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache: Whether to us KV cache for decoding """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class PegasusXPreTrainedModel(PreTrainedModel): config_class = PegasusXConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = [r"PegasusXEncoderLayer", r"PegasusXDecoderLayer"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) PEGASUS_X_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PegasusXConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PEGASUS_X_GENERATION_EXAMPLE = r""" Summarization example: ```python >>> from transformers import AutoTokenizer, PegasusXForConditionalGeneration >>> model = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base") >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-x-large") >>> ARTICLE_TO_SUMMARIZE = ( ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds " ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were " ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow." ... ) >>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt") >>> # Generate Summary >>> summary_ids = model.generate(inputs["input_ids"]) >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "California's largest electricity provider has turned off power to hundreds of thousands of customers." ``` """ PEGASUS_X_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class PegasusXEncoder(PegasusXPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`PegasusXEncoderLayer`]. Args: config: PegasusXConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = PegasusXScaledWordEmbedding( config.vocab_size, embed_dim, padding_idx, embed_scale=embed_scale ) self.embed_global = nn.Embedding(config.num_global_tokens, embed_dim) self.embed_positions = PegasusXSinusoidalPositionalEmbedding(embed_dim) self.layers = nn.ModuleList( [ PegasusXEncoderLayer( stagger_blocks_this_layer=i % 2 == 1 and config.stagger_local_blocks, config=config ) for i in range(config.encoder_layers) ] ) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings matrix of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embeddings. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...") self.config.max_position_embeddings = new_num_position_embeddings self.embed_positions = PegasusXSinusoidalPositionalEmbedding(self.config.d_model) self.embed_positions.to(self.device) def get_position_embeddings(self) -> nn.Embedding: """ Returns the position embeddings matrix """ return self.embed_positions def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(inputs_embeds) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) batch_size, seq_len, _ = hidden_states.shape # Setup mask if attention_mask is None: attention_mask = torch.ones(*input_shape, dtype=inputs_embeds.dtype, device=inputs_embeds.device) attention_mask = attention_mask.to(dtype=hidden_states.dtype) mask_min_value = torch.finfo(hidden_states.dtype).min inverted_mask = 1.0 - attention_mask attention_mask = inverted_mask.masked_fill( inverted_mask.to(torch.bool), mask_min_value, ) # padding to block_size if seq_len % self.config.block_size != 0: pad_len = self.config.block_size - seq_len % self.config.block_size hidden_states = nn.functional.pad(hidden_states, pad=(0, 0, 0, pad_len), value=0) attention_mask = nn.functional.pad(attention_mask, pad=(0, pad_len), value=mask_min_value) # Global tokens global_hidden_states = self.embed_global( torch.arange(self.config.num_global_tokens, device=hidden_states.device)[None].expand(batch_size, -1) ) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, global_hidden_states, attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, global_hidden_states, attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] global_hidden_states = layer_outputs[1] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) # Undo padding-to-block-size hidden_states = hidden_states[:, :seq_len] hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + ((hidden_states, global_hidden_states),) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class PegasusXDecoder(PegasusXPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`] Args: config: PegasusXConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 padding_idx = config.pad_token_id if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = PegasusXScaledWordEmbedding( config.vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale ) self.embed_positions = PegasusXSinusoidalPositionalEmbedding(config.d_model) self.layers = nn.ModuleList([PegasusXDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(inputs_embeds, past_key_values_length) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare PEGASUS-X Model outputting raw hidden-states without any specific head on top.", PEGASUS_X_START_DOCSTRING, ) class PegasusXModel(PegasusXPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: PegasusXConfig): super().__init__(config) vocab_size = config.vocab_size embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 padding_idx = config.pad_token_id self.shared = PegasusXScaledWordEmbedding( vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale ) self.encoder = PegasusXEncoder(config, self.shared) self.decoder = PegasusXDecoder(config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings matrix of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embeddings. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ self.config.max_position_embeddings = new_num_position_embeddings self.encoder.resize_position_embeddings(new_num_position_embeddings) self.decoder.resize_position_embeddings(new_num_position_embeddings) def get_position_embeddings(self) -> Tuple[nn.Embedding]: """ Returns the position embeddings matrix """ return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings()) @add_start_docstrings_to_model_forward(PEGASUS_X_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqModelOutput]: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, PegasusModel >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-x-large") >>> model = PegasusModel.from_pretrained("google/pegasus-x-large") >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt") >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 4, 1024] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("The PEGASUS-X for conditional generation (e.g. summarization).", PEGASUS_X_START_DOCSTRING) class PegasusXForConditionalGeneration(PegasusXPreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: PegasusXConfig): super().__init__(config) self.model = PegasusXModel(config) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings matrix of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embeddings. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ self.config.max_position_embeddings = new_num_position_embeddings self.model.encoder.resize_position_embeddings(new_num_position_embeddings) self.model.decoder.resize_position_embeddings(new_num_position_embeddings) def get_position_embeddings(self) -> Tuple[nn.Embedding]: """ Returns the position embeddings matrix """ return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings()) @add_start_docstrings_to_model_forward(PEGASUS_X_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(PEGASUS_X_GENERATION_EXAMPLE) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->PegasusX class PegasusXDecoderWrapper(PegasusXPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = PegasusXDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/timm_backbone/configuration_timm_backbone.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Configuration for Backbone models""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class TimmBackboneConfig(PretrainedConfig): r""" This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`]. It is used to instantiate a timm backbone model according to the specified arguments, defining the model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone (`str`, *optional*): The timm checkpoint to load. num_channels (`int`, *optional*, defaults to 3): The number of input channels. features_only (`bool`, *optional*, defaults to `True`): Whether to output only the features or also the logits. use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use a pretrained backbone. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). Will default to the last stage if unset. freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`): Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. Example: ```python >>> from transformers import TimmBackboneConfig, TimmBackbone >>> # Initializing a timm backbone >>> configuration = TimmBackboneConfig("resnet50") >>> # Initializing a model from the configuration >>> model = TimmBackbone(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "timm_backbone" def __init__( self, backbone=None, num_channels=3, features_only=True, use_pretrained_backbone=True, out_indices=None, freeze_batch_norm_2d=False, **kwargs, ): super().__init__(**kwargs) self.backbone = backbone self.num_channels = num_channels self.features_only = features_only self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = True self.out_indices = out_indices if out_indices is not None else (-1,) self.freeze_batch_norm_2d = freeze_batch_norm_2d
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/timm_backbone/modeling_timm_backbone.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union import torch from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class TimmBackbone(PreTrainedModel, BackboneMixin): """ Wrapper class for timm models to be used as backbones. This enables using the timm models interchangeably with the other models in the library keeping the same API. """ main_input_name = "pixel_values" supports_gradient_checkpointing = False config_class = TimmBackboneConfig def __init__(self, config, **kwargs): requires_backends(self, "timm") super().__init__(config) self.config = config if config.backbone is None: raise ValueError("backbone is not set in the config. Please set it to a timm model name.") if config.backbone not in timm.list_models(): raise ValueError(f"backbone {config.backbone} is not supported by timm.") if hasattr(config, "out_features") and config.out_features is not None: raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.") pretrained = getattr(config, "use_pretrained_backbone", None) if pretrained is None: raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.") # We just take the final layer by default. This matches the default for the transformers models. out_indices = config.out_indices if getattr(config, "out_indices", None) is not None else (-1,) in_chans = kwargs.pop("in_chans", config.num_channels) self._backbone = timm.create_model( config.backbone, pretrained=pretrained, # This is currently not possible for transformer architectures. features_only=config.features_only, in_chans=in_chans, out_indices=out_indices, **kwargs, ) # Converts all `BatchNorm2d` and `SyncBatchNorm` or `BatchNormAct2d` and `SyncBatchNormAct2d` layers of provided module into `FrozenBatchNorm2d` or `FrozenBatchNormAct2d` respectively if getattr(config, "freeze_batch_norm_2d", False): self.freeze_batch_norm_2d() # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. self._return_layers = { layer["module"]: str(layer["index"]) for layer in self._backbone.feature_info.get_dicts() } self._all_layers = {layer["module"]: str(i) for i, layer in enumerate(self._backbone.feature_info.info)} super()._init_backbone(config) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): requires_backends(cls, ["vision", "timm"]) from ...models.timm_backbone import TimmBackboneConfig config = kwargs.pop("config", TimmBackboneConfig()) use_timm = kwargs.pop("use_timm_backbone", True) if not use_timm: raise ValueError("use_timm_backbone must be True for timm backbones") num_channels = kwargs.pop("num_channels", config.num_channels) features_only = kwargs.pop("features_only", config.features_only) use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone) out_indices = kwargs.pop("out_indices", config.out_indices) config = TimmBackboneConfig( backbone=pretrained_model_name_or_path, num_channels=num_channels, features_only=features_only, use_pretrained_backbone=use_pretrained_backbone, out_indices=out_indices, ) return super()._from_config(config, **kwargs) def freeze_batch_norm_2d(self): timm.layers.freeze_batch_norm_2d(self._backbone) def unfreeze_batch_norm_2d(self): timm.layers.unfreeze_batch_norm_2d(self._backbone) def _init_weights(self, module): """ Empty init weights function to ensure compatibility of the class in the library. """ pass def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("Cannot output attentions for timm backbones at the moment") if output_hidden_states: # We modify the return layers to include all the stages of the backbone self._backbone.return_layers = self._all_layers hidden_states = self._backbone(pixel_values, **kwargs) self._backbone.return_layers = self._return_layers feature_maps = tuple(hidden_states[i] for i in self.out_indices) else: feature_maps = self._backbone(pixel_values, **kwargs) hidden_states = None feature_maps = tuple(feature_maps) hidden_states = tuple(hidden_states) if hidden_states is not None else None if not return_dict: output = (feature_maps,) if output_hidden_states: output = output + (hidden_states,) return output return BackboneOutput(feature_maps=feature_maps, hidden_states=hidden_states, attentions=None)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/timm_backbone/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_timm_backbone"] = ["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/tvlt/feature_extraction_tvlt.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for TVLT.""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging logger = logging.get_logger(__name__) class TvltFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a TVLT audio feature extractor. This feature extractor can be used to prepare audios for the model. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: spectrogram_length (`Dict[str, int]` *optional*, defaults to 2048): The time length of each audio spectrogram. num_channels (`int` *optional*, defaults to 1): Number of audio channels. patch_size (`List[int]` *optional*, defaults to `[16, 16]`): The patch size of audio patch embedding. feature_size (`int`, *optional*, defaults to 128): The frequency length of audio spectrogram. sampling_rate (`int`, *optional*, defaults to 44100): The sampling rate at which the audio files should be digitalized expressed in Hertz (Hz). hop_length_to_sampling_rate (`int`, *optional*, defaults to 86): Hop length is length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients. For example, with sampling rate 44100, the hop length is 512, with 44100 / 512 = 86 n_fft (`int`, *optional*, defaults to 2048): Size of the Fourier transform. padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. Should correspond to silences. """ model_input_names = ["audio_values", "audio_mask"] def __init__( self, spectrogram_length=2048, num_channels=1, patch_size=[16, 16], feature_size=128, sampling_rate=44100, hop_length_to_sampling_rate=86, n_fft=2048, padding_value=0.0, **kwargs, ): super().__init__( feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs, ) self.spectrogram_length = spectrogram_length self.num_channels = num_channels self.patch_size = patch_size self.freq_len = feature_size // self.patch_size[1] self.n_fft = n_fft self.hop_length = sampling_rate // hop_length_to_sampling_rate self.sampling_rate = sampling_rate self.padding_value = padding_value self.mel_filters = mel_filter_bank( num_frequency_bins=1 + n_fft // 2, num_mel_filters=feature_size, min_frequency=0.0, max_frequency=22050.0, sampling_rate=sampling_rate, norm="slaney", mel_scale="slaney", ).T def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: """ Compute the log-mel spectrogram of the provided audio, gives similar results to Whisper's original torch implementation with 1e-5 tolerance. """ log_spec = spectrogram( waveform, window_function(self.n_fft, "hann"), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, ) log_spec = log_spec[:, :-1] log_spec = log_spec - 20.0 log_spec = np.clip(log_spec / 40.0, -2.0, 0.0) + 1.0 return log_spec def __call__( self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], return_tensors: Optional[Union[str, TensorType]] = None, return_attention_mask: Optional[bool] = True, sampling_rate: Optional[int] = None, resample: bool = False, mask_audio: bool = False, **kwargs, ) -> BatchFeature: """ Main method to prepare one or several audio(s) for the model. Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*, default to `True`): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) <Tip> For TvltTransformer models, `attention_mask` should alwys be passed for batched inference, to avoid subtle bugs. </Tip> sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. Current model supports sampling rate 16000 and 44100. resample (`bool`, *optional*, defaults to `False`): If the sampling rate is not matched, resample the input audio to match. mask_audio (`bool`, *optional*, defaults to `False`): Whether or not to mask input audio for MAE task. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **audio_values** -- Audio values to be fed to a model, of shape (batch_size, num_channels, height, width). - **audio_mask** -- Audio masks to be fed to a model, of shape (batch_size, num_audio_patches). """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" f" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) # always return batch if not is_batched: raw_speech = [np.asarray([raw_speech]).T] # Convert audio signals to log mel spectrograms, truncate by time axis audio_features = [ self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0], List): audio_features = [np.asarray(feature, dtype=np.float32) for feature in audio_features] # Create audio attention mask max_patch_len = max( [ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: audio_mask = [ (ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0] for feature in audio_features ] audio_mask = np.array(audio_mask).astype(np.float32) # convert into correct format for padding max_time_len = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch padded_audio_features = np.ones([len(audio_features), 1, max_time_len, self.feature_size]).astype(np.float32) padded_audio_features = padded_audio_features * self.padding_value for i in range(len(audio_features)): feature = audio_features[i] padded_audio_features[i, :, : feature.shape[0], :] = feature # return as BatchFeature if return_attention_mask: data = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: data = {"audio_values": padded_audio_features} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/tvlt/configuration_tvlt.py
# coding=utf-8 # Copyright 2023 MURGe-Lab and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TVLT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class TvltConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TvltModel`]. It is used to instantiate a TVLT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TVLT [ZinengTang/tvlt-base](https://huggingface.co/ZinengTang/tvlt-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. spectrogram_length (`int`, *optional*, defaults to 2048): The time length of each audio spectrogram. frequency_length (`int`, *optional*, defaults to 128): The frequency length of audio spectrogram. image_patch_size (`List[int]`, *optional*, defaults to `[16, 16]`): The size (resolution) of each image patch. audio_patch_size (`List[int]`, *optional*, defaults to `[16, 16]`): The size (resolution) of each audio patch. num_image_channels (`int`, *optional*, defaults to 3): The number of input image channels. num_audio_channels (`int`, *optional*, defaults to 1): The number of input audio channels. num_frames (`int`, *optional*, defaults to 8): The maximum number of frames for an input video. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. use_mean_pooling (`bool`, *optional*, defaults to `False`): Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token. decoder_num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the decoder. decoder_hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the decoder. decoder_num_hidden_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the decoder. decoder_intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder. pixel_mask_ratio (`float`, *optional*, defaults to 0.75): Image patch masking ratio. audio_mask_ratio (`float`, *optional*, defaults to 0.15): Audio patch masking ratio. audio_mask_type (`str`, *optional*, defaults to `"frame-level"`): Audio patch masking type, choose between "frame-level" and "patch-level". task_matching (`bool`, *optional*, defaults to `True`): Whether to use vision audio matching task in pretraining. task_mae (`bool`, *optional*, defaults to `True`): Whether to use the masked auto-encoder (MAE) in pretraining. loss_type (`str`, *optional*, defaults to `"classification"`): Loss types including regression and classification. Example: ```python >>> from transformers import TvltConfig, TvltModel >>> # # Initializing a TVLT ZinengTang/tvlt-base style configuration >>> configuration = TvltConfig() >>> # # Initializing a model (with random weights) from the ZinengTang/tvlt-base style configuration >>> model = TvltModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "tvlt" def __init__( self, image_size=224, spectrogram_length=2048, frequency_length=128, image_patch_size=[16, 16], audio_patch_size=[16, 16], num_image_channels=3, num_audio_channels=1, num_frames=8, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-6, qkv_bias=True, use_mean_pooling=False, decoder_num_attention_heads=16, decoder_hidden_size=512, decoder_num_hidden_layers=8, decoder_intermediate_size=2048, pixel_mask_ratio=0.75, audio_mask_ratio=0.15, audio_mask_type="frame-level", task_matching=True, task_mae=True, loss_type="classification", **kwargs, ): super().__init__(**kwargs) if audio_mask_type not in ("frame-level", "patch_level"): raise ValueError( "audio_mask_type must be one of two acceptable strategies - {'frame_level', 'patch-level') " f"got {audio_mask_type}" ) self.image_size = image_size self.spectrogram_length = spectrogram_length self.frequency_length = frequency_length self.image_patch_size = image_patch_size self.audio_patch_size = audio_patch_size self.num_image_channels = num_image_channels self.num_audio_channels = num_audio_channels self.num_frames = num_frames self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.use_mean_pooling = use_mean_pooling self.decoder_num_attention_heads = decoder_num_attention_heads self.decoder_hidden_size = decoder_hidden_size self.decoder_num_hidden_layers = decoder_num_hidden_layers self.decoder_intermediate_size = decoder_intermediate_size self.pixel_mask_ratio = pixel_mask_ratio self.audio_mask_ratio = audio_mask_ratio self.audio_mask_type = audio_mask_type self.task_matching = task_matching self.task_mae = task_mae self.loss_type = loss_type
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/tvlt/processing_tvlt.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for TVLT. """ from ...processing_utils import ProcessorMixin class TvltProcessor(ProcessorMixin): r""" Constructs a TVLT processor which wraps a TVLT image processor and TVLT feature extractor into a single processor. [`TvltProcessor`] offers all the functionalities of [`TvltImageProcessor`] and [`TvltFeatureExtractor`]. See the docstring of [`~TvltProcessor.__call__`] for more information. Args: image_processor (`TvltImageProcessor`): An instance of [`TvltImageProcessor`]. The image processor is a required input. feature_extractor (`TvltFeatureExtractor`): An instance of [`TvltFeatureExtractor`]. The feature extractor is a required input. """ attributes = ["image_processor", "feature_extractor"] image_processor_class = "TvltImageProcessor" feature_extractor_class = "TvltFeatureExtractor" def __init__(self, image_processor, feature_extractor): super().__init__(image_processor=image_processor, feature_extractor=feature_extractor) self.image_processor = image_processor self.feature_extractor = feature_extractor def __call__( self, images=None, audio=None, images_mixed=None, sampling_rate=None, mask_audio=False, mask_pixel=False, *args, **kwargs, ): """ Forwards the `images` argument to TvltImageProcessor's [`~TvltImageProcessor.preprocess`] and the `audio` argument to TvltFeatureExtractor's [`~TvltFeatureExtractor.__call__`]. Please refer to the docstring of the above two methods for more information. """ if images is None and audio is None: raise ValueError("You need to specify either an `images` or `audio` input to process.") images_mixed_dict = None if images is not None: images_dict = self.image_processor(images, mask_pixel=mask_pixel, *args, **kwargs) if images_mixed is not None: images_mixed_dict = self.image_processor(images_mixed, is_mixed=True, *args, **kwargs) if audio is not None: audio_dict = self.feature_extractor( audio, *args, sampling_rate=sampling_rate, mask_audio=mask_audio, **kwargs ) output_dict = {} if audio is not None: output_dict.update(audio_dict) if images is not None: output_dict.update(images_dict) if images_mixed_dict is not None: output_dict.update(images_mixed_dict) return output_dict @property def model_input_names(self): image_processor_input_names = self.image_processor.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/tvlt/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available, ) _import_structure = { "configuration_tvlt": ["TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP", "TvltConfig"], "feature_extraction_tvlt": ["TvltFeatureExtractor"], "processing_tvlt": ["TvltProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tvlt"] = [ "TVLT_PRETRAINED_MODEL_ARCHIVE_LIST", "TvltModel", "TvltForPreTraining", "TvltForAudioVisualClassification", "TvltPreTrainedModel", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_tvlt"] = ["TvltImageProcessor"] if TYPE_CHECKING: from .configuration_tvlt import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP, TvltConfig from .processing_tvlt import TvltProcessor from .feature_extraction_tvlt import TvltFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tvlt import ( TVLT_PRETRAINED_MODEL_ARCHIVE_LIST, TvltForAudioVisualClassification, TvltForPreTraining, TvltModel, TvltPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_tvlt import TvltImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/tvlt/modeling_tvlt.py
# coding=utf-8 # Copyright 2023 MURGe-Lab and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch TVLT model.""" import collections.abc import math from copy import deepcopy from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_tvlt import TvltConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "TvltConfig" _CHECKPOINT_FOR_DOC = "ZinengTang/tvlt-base" from ..deprecated._archive_maps import TVLT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 @dataclass class TvltModelOutput(ModelOutput): """ Class for TvltModel's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. last_pixel_hidden_state (`torch.FloatTensor` of shape `(batch_size, pixel_sequence_length, hidden_size)`): Pixel sequence of hidden-states at the output of the last layer of the model. last_audio_hidden_state (`torch.FloatTensor` of shape `(batch_size, audio_sequence_length, hidden_size)`): Audio sequence of hidden-states at the output of the last layer of the model. pixel_label_masks (`torch.FloatTensor` of shape `(batch_size, pixel_patch_length)`): Tensor indicating which pixel patches are masked (1) and which are not (0). audio_label_masks (`torch.FloatTensor` of shape `(batch_size, audio_patch_length)`): Tensor indicating which audio patches are masked (1) and which are not (0). pixel_ids_restore (`torch.LongTensor` of shape `(batch_size, pixel_patch_length)`): Tensor containing the ids permutation of pixel masking. audio_ids_restore (`torch.LongTensor` of shape `(batch_size, audio_patch_length)`): Tensor containing the ids permutation of audio masking. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None last_pixel_hidden_state: torch.FloatTensor = None last_audio_hidden_state: torch.FloatTensor = None pixel_label_masks: torch.LongTensor = None audio_label_masks: torch.LongTensor = None pixel_ids_restore: torch.LongTensor = None audio_ids_restore: torch.LongTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class TvltDecoderOutput(ModelOutput): """ Class for TvltDecoder's outputs, with potential hidden states and attentions. Args: logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`): Pixel reconstruction logits. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class TvltForPreTrainingOutput(ModelOutput): """ Class for TvltForPreTraining's outputs, with potential hidden states and attentions. Args: loss (`torch.FloatTensor` of shape `(1,)`): Pixel reconstruction loss. matching_logits (`torch.FloatTensor` of shape `(batch_size, 1)`): Matching objective logits. pixel_logits (`torch.FloatTensor` of shape `(batch_size, pixel_patch_length, image_patch_size ** 3 * pixel_num_channels)`): Pixel reconstruction logits. audio_logits (`torch.FloatTensor` of shape `(batch_size, audio_patch_length, image_patch_size[0] * image_patch_size[1])`): Audio reconstruction logits. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None matching_logits: torch.FloatTensor = None pixel_logits: torch.FloatTensor = None audio_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None def generate_pixel_mask_noise(pixel_values, pixel_mask=None, mask_ratio=0.75): """Generate noise for audio masking.""" batch_size, seq_len = pixel_values.shape[:2] noise = torch.rand((batch_size, seq_len), device=pixel_values.device) # noise in [0, 1] len_keep = int(seq_len * (1 - mask_ratio)) return noise, len_keep def generate_audio_mask_noise(audio_values, audio_mask=None, mask_ratio=0.75, mask_type="patch-level", freq_len=8): """Generate noise for audio masking.""" batch_size, seq_len = audio_values.shape[:2] if mask_type == "frame-level": num_time_patches = seq_len // freq_len noise = ( torch.rand(batch_size, num_time_patches, device=audio_values.device) .unsqueeze(-1) .repeat(1, 1, freq_len) .view(batch_size, seq_len) ) # noise in [0, 1] elif mask_type == "patch-level": noise = torch.rand(batch_size, seq_len, device=audio_values.device) # noise in [0, 1] len_keep = int(seq_len * (1 - mask_ratio)) return noise, len_keep def random_masking(sequence, noise, len_keep, attention_masks=None): """ Perform random masking by per-sample shuffling on frame-level. Per-sample shuffling is done by argsort random noise. sequence: [batch_size, seq_len, hidden_dim], sequence """ batch_size, seq_len, hidden_dim = sequence.shape # sort noise for each sample ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=1) # keep the first subset ids_keep = ids_shuffle[:, :len_keep] sequence_masked = torch.gather(sequence, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, hidden_dim)) # generate the binary mask: 0 is keep, 1 is remove label_masks = torch.ones([batch_size, seq_len], device=sequence.device) label_masks[:, :len_keep] = 0 # unshuffle to get the binary mask label_masks = torch.gather(label_masks, dim=1, index=ids_restore) if attention_masks is not None: label_masks *= attention_masks attention_masks = torch.gather(attention_masks, dim=1, index=ids_keep) return sequence_masked, attention_masks, label_masks, ids_restore class TvltPixelEmbeddings(nn.Module): """Construct the patch and position embeddings.""" def __init__(self, config): super().__init__() self.patch_embeddings = TvltPixelPatchEmbeddings(config) self.num_patches_per_image = self.patch_embeddings.num_patches_per_image self.type_embed_v = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.temporal_embed = nn.Parameter(torch.zeros(1, config.num_frames, config.hidden_size)) self.pos_embed_v = nn.Parameter(torch.zeros(1, self.num_patches_per_image, config.hidden_size)) self.config = config def forward(self, pixel_values, attention_masks=None): # create patch embeddings batch_size, num_frames, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values) embeddings += self.pos_embed_v.repeat(1, num_frames, 1) embeddings += torch.repeat_interleave(self.temporal_embed[:, :num_frames], self.num_patches_per_image, dim=1) embeddings += self.type_embed_v return embeddings, attention_masks class TvltAudioEmbeddings(nn.Module): """Construct the patch and position embeddings.""" def __init__(self, config): super().__init__() self.patch_embeddings = TvltAudioPatchEmbeddings(config) self.num_patches = self.patch_embeddings.num_patches self.type_embed_a = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.num_freq_patches = config.frequency_length // config.audio_patch_size[1] self.pos_embed_a = nn.Parameter(torch.zeros(1, self.num_patches // self.num_freq_patches, config.hidden_size)) self.freq_embed = nn.Parameter(torch.zeros(1, self.num_freq_patches, config.hidden_size)) self.num_freq_patches = config.frequency_length // config.audio_patch_size[1] self.config = config def forward(self, audio_values, attention_masks=None): # create patch embeddings embeddings = self.patch_embeddings(audio_values) num_time_patches = embeddings.size(1) // self.num_freq_patches embeddings += self.freq_embed.repeat(1, num_time_patches, 1) embeddings += torch.repeat_interleave(self.pos_embed_a[:, :num_time_patches], self.num_freq_patches, dim=1) embeddings += self.type_embed_a return embeddings, attention_masks class TvltPixelPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.image_patch_size num_channels, hidden_size = config.num_image_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches_per_image = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches_per_image = num_patches_per_image self.hidden_size = hidden_size self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: batch_size, num_frames, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) pixel_values = pixel_values.reshape(batch_size * num_frames, num_channels, height, width) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) embeddings = embeddings.reshape(batch_size, num_frames * self.num_patches_per_image, self.hidden_size) return embeddings class TvltAudioPatchEmbeddings(nn.Module): """ This class turns `audio_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() spectrogram_length, frequency_length, patch_size = ( config.spectrogram_length, config.frequency_length, config.audio_patch_size, ) num_channels, hidden_size = config.num_audio_channels, config.hidden_size spectrogram_size = (spectrogram_length, frequency_length) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (spectrogram_size[1] // patch_size[1]) * (spectrogram_size[0] // patch_size[0]) patch_shape = (spectrogram_size[0] // patch_size[0], spectrogram_size[1] // patch_size[1]) self.spectrogram_size = spectrogram_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.patch_shape = patch_shape self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, audio_values: torch.Tensor) -> torch.Tensor: batch_size, num_channels, height, width = audio_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if height > self.spectrogram_size[0] or width != self.spectrogram_size[1]: raise ValueError( f"Input audio size ({height}*{width}) doesn't match model" f" ({self.spectrogram_size[0]}*{self.spectrogram_size[1]})." ) embeddings = self.projection(audio_values).flatten(2).transpose(1, 2) return embeddings # Copied from transformers.models.vilt.modeling_vilt.ViltSelfAttention with Vilt->Tvlt class TvltSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vilt.modeling_vilt.ViltSelfOutput with Vilt->Tvlt class TvltSelfOutput(nn.Module): """ The residual connection is defined in TvltLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: TvltConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vilt.modeling_vilt.ViltAttention with Vilt->Tvlt class TvltAttention(nn.Module): def __init__(self, config): super().__init__() self.attention = TvltSelfAttention(config) self.output = TvltSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): self_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.vilt.modeling_vilt.ViltIntermediate with Vilt->Tvlt class TvltIntermediate(nn.Module): def __init__(self, config: TvltConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vilt.modeling_vilt.ViltOutput with Vilt->Tvlt class TvltOutput(nn.Module): def __init__(self, config: TvltConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # Copied from transformers.models.vilt.modeling_vilt.ViltLayer with Vilt->Tvlt class TvltLayer(nn.Module): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = TvltAttention(config) self.intermediate = TvltIntermediate(config) self.output = TvltOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViLT, layernorm is applied before self-attention attention_mask, head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states.to(attention_output.device) # in ViLT, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.vilt.modeling_vilt.ViltEncoder with Vilt->Tvlt class TvltEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([TvltLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class TvltPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TvltConfig base_model_prefix = "tvlt" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) TVLT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TvltConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TVLT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details. audio_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Audio values. Audio values can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details. pixel_mask (`torch.FloatTensor` of shape `(batch_size, num_pixel_patches)`): Pixel masks. Pixel masks can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details. audio_mask (`torch.FloatTensor` of shape `(batch_size, num_audio_patches)`): Audio masks. Audio masks can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details. pixel_values_mixed (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Pixel values that mix positive and negative samples in Tvlt vision-audio matching. Pixel values mixed can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details. pixel_mask_mixed (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel masks of pixel_values_mixed. Pixel masks mixed can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details. mask_pixel (`bool`, *optional*): Whether to mask pixel for MAE tasks. Only set to True in TvltForPreTraining. mask_audio (`bool`, *optional*): Whether to mask audio for MAE tasks. Only set to True in TvltForPreTraining. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare TVLT Model transformer outputting raw hidden-states without any specific head on top.", TVLT_START_DOCSTRING, ) class TvltModel(TvltPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.pixel_embeddings = TvltPixelEmbeddings(config) self.audio_embeddings = TvltAudioEmbeddings(config) self.encoder = TvltEncoder(config) self.cls_embedding = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if config.use_mean_pooling: self.layernorm = None else: self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.pixel_embeddings.patch_embeddings, self.audio_embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(TVLT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TvltModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, audio_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor] = None, audio_mask: Optional[torch.FloatTensor] = None, mask_pixel: bool = False, mask_audio: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], TvltModelOutput]: r""" Returns: Examples: ```python >>> from transformers import TvltProcessor, TvltModel >>> import numpy as np >>> import torch >>> num_frames = 8 >>> images = list(np.random.randn(num_frames, 3, 224, 224)) >>> audio = list(np.random.randn(10000)) >>> processor = TvltProcessor.from_pretrained("ZinengTang/tvlt-base") >>> model = TvltModel.from_pretrained("ZinengTang/tvlt-base") >>> input_dict = processor(images, audio, sampling_rate=44100, return_tensors="pt") >>> outputs = model(**input_dict) >>> loss = outputs.loss ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict pixel_embedding_output, pixel_mask = self.pixel_embeddings(pixel_values, pixel_mask) audio_embedding_output, audio_mask = self.audio_embeddings(audio_values, audio_mask) # Mask pixel if mask_pixel is True pixel_label_masks = None pixel_ids_restore = None if mask_pixel: pixel_mask_noise, pixel_len_keep = generate_pixel_mask_noise( pixel_embedding_output, pixel_mask=pixel_mask, mask_ratio=self.config.pixel_mask_ratio ) pixel_embedding_output, pixel_mask, pixel_label_masks, pixel_ids_restore = random_masking( pixel_embedding_output, pixel_mask_noise, pixel_len_keep, attention_masks=pixel_mask, ) # Mask audio if mask_audio is True audio_label_masks = None audio_ids_restore = None if mask_audio: num_freq_patches = self.config.frequency_length // self.config.audio_patch_size[1] audio_mask_noise, audio_len_keep = generate_audio_mask_noise( audio_embedding_output, audio_mask=audio_mask, mask_ratio=self.config.audio_mask_ratio, mask_type=self.config.audio_mask_type, freq_len=num_freq_patches, ) audio_embedding_output, audio_mask, audio_label_masks, audio_ids_restore = random_masking( audio_embedding_output, audio_mask_noise, audio_len_keep, attention_masks=audio_mask, ) # Prepare for encoder inputs and attention masks batch_size = pixel_values.size(0) embedding_output = torch.cat( [self.cls_embedding.repeat(batch_size, 1, 1), pixel_embedding_output, audio_embedding_output], 1 ) masked_pixel_len = pixel_embedding_output.size(1) attention_mask = None if pixel_mask is not None and audio_mask is not None: attention_mask = torch.cat([pixel_mask[:, :1], pixel_mask, audio_mask], 1) input_shape = embedding_output.size() extended_attention_mask = None if attention_mask is not None: extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if self.layernorm is not None: sequence_output = self.layernorm(sequence_output) pixel_sequence_output = sequence_output[:, 1 : 1 + masked_pixel_len] audio_sequence_output = sequence_output[:, 1 + masked_pixel_len :] if not return_dict: return ( sequence_output, pixel_sequence_output, audio_sequence_output, pixel_label_masks, audio_label_masks, pixel_ids_restore, audio_ids_restore, ) + encoder_outputs[1:] return TvltModelOutput( last_hidden_state=sequence_output, last_pixel_hidden_state=pixel_sequence_output, last_audio_hidden_state=audio_sequence_output, pixel_label_masks=pixel_label_masks, audio_label_masks=audio_label_masks, pixel_ids_restore=pixel_ids_restore, audio_ids_restore=audio_ids_restore, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TvltDecoder(nn.Module): def __init__(self, config): super().__init__() decoder_config = deepcopy(config) decoder_config.hidden_size = config.decoder_hidden_size decoder_config.num_hidden_layers = config.decoder_num_hidden_layers decoder_config.num_attention_heads = config.decoder_num_attention_heads decoder_config.intermediate_size = config.decoder_intermediate_size self.decoder_layers = nn.ModuleList( [TvltLayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)] ) self.layernorm = nn.LayerNorm(config.decoder_hidden_size, eps=config.layer_norm_eps) self.gradient_checkpointing = False self.config = config def forward( self, hidden_states, output_attentions=False, output_hidden_states=False, return_dict=True, ): # apply Transformer layers (blocks) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.decoder_layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, None, output_attentions, ) else: layer_outputs = layer_module(hidden_states, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # predictor projection logits = self.layernorm(hidden_states) if not return_dict: return tuple(v for v in [logits, all_hidden_states, all_self_attentions] if v is not None) return TvltDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions) @add_start_docstrings( "The TVLT Model transformer with the decoder on top for self-supervised pre-training.", TVLT_START_DOCSTRING, ) class TvltForPreTraining(TvltPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.task_matching = config.task_matching self.task_mae = config.task_mae if not (self.task_matching or self.task_mae): raise ValueError("Must set at least one of matching task and MAE task to true") self.tvlt = TvltModel(config) if self.task_matching: self.matching_head = TvltMatchingHead(config) if self.task_mae: self.encoder_to_decoder = nn.Linear(config.hidden_size, config.decoder_hidden_size, bias=True) self.pixel_mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) self.audio_mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) self.decoder = TvltDecoder(config) decoder_hidden_size = config.decoder_hidden_size num_frames = config.num_frames num_patches_per_image = self.tvlt.pixel_embeddings.num_patches_per_image self.decoder_pixel_pos_embed = nn.Parameter(torch.zeros(1, num_patches_per_image, decoder_hidden_size)) self.decoder_temporal_embed = nn.Parameter(torch.zeros(1, config.num_frames, decoder_hidden_size)) self.decoder_pixel_type_embed = nn.Parameter(torch.zeros(1, 1, decoder_hidden_size)) num_audio_patches = self.tvlt.audio_embeddings.num_patches num_freq_patches = config.frequency_length // config.audio_patch_size[1] self.decoder_audio_pos_embed = nn.Parameter( torch.zeros(1, num_audio_patches // num_freq_patches, decoder_hidden_size) ) self.decoder_freq_embed = nn.Parameter(torch.zeros(1, num_freq_patches, decoder_hidden_size)) self.decoder_audio_type_embed = nn.Parameter(torch.zeros(1, 1, decoder_hidden_size)) pixel_mae_output_dim = self.config.image_patch_size[0] ** 2 * self.config.num_image_channels self.pixel_mae_head = TvltMAEHead(config, pixel_mae_output_dim) audio_mae_output_dim = ( self.config.audio_patch_size[0] * self.config.audio_patch_size[1] * self.config.num_audio_channels ) self.audio_mae_head = TvltMAEHead(config, audio_mae_output_dim) self.num_frames = num_frames self.num_patches_per_image = num_patches_per_image self.num_freq_patches = num_freq_patches self.image_patch_size = config.image_patch_size self.audio_patch_size = config.audio_patch_size # Initialize weights and apply final processing self.post_init() def patchify_pixel(self, pixel_values): """ pixel_values: [batch_size, num_frames, 3, height, width] """ batch_size, num_frames, num_channels, height, width = pixel_values.shape num_patches_height = pixel_values.shape[3] // self.image_patch_size[0] num_patches_width = pixel_values.shape[4] // self.image_patch_size[1] patchified_pixel_values = pixel_values.reshape( shape=( batch_size, num_frames, num_channels, num_patches_height, self.image_patch_size[0], num_patches_width, self.image_patch_size[1], ) ) patchified_pixel_values = torch.einsum("ntchpwq->nthwpqc", patchified_pixel_values) patchified_pixel_values = patchified_pixel_values.reshape( shape=( batch_size, num_patches_height * num_patches_width * num_frames, self.image_patch_size[0] * self.image_patch_size[1] * num_channels, ) ) return patchified_pixel_values def patchify_audio(self, audio_values): """ audio_values: [batch_size, 1, height, width] """ batch_size, num_channels, height, width = audio_values.shape num_patches_height = height // self.audio_patch_size[0] num_patches_width = width // self.audio_patch_size[1] patchified_audio_values = audio_values.reshape( shape=( batch_size, num_channels, num_patches_height, self.audio_patch_size[0], num_patches_width, self.audio_patch_size[1], ) ) patchified_audio_values = torch.einsum("nchpwq->nhwpqc", patchified_audio_values) patchified_audio_values = patchified_audio_values.reshape( shape=( batch_size, num_patches_height * num_patches_width, self.audio_patch_size[0] * self.audio_patch_size[1] * num_channels, ) ) return patchified_audio_values def pixel_mae_loss(self, pixel_values, pixel_predictions, mask): patchified_pixel_values = self.patchify_pixel(pixel_values) loss = (pixel_predictions - patchified_pixel_values) ** 2 loss = loss.mean(dim=-1) # [batch_size, pixel_pixel_length], mean loss per patch loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches return loss def audio_mae_loss(self, audio_values, audio_predictions, mask): patchified_audio_values = self.patchify_audio(audio_values) loss = (audio_predictions - patchified_audio_values) ** 2 loss = loss.mean(dim=-1) # [batch_size, audio_pixel_length], mean loss per patch loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches return loss def concatenate_mask(self, mask_token, sequence, ids_restore): batch_size, seq_length, dim = sequence.shape mask_tokens = mask_token.repeat(batch_size, ids_restore.shape[1] - seq_length, 1) padded_sequence = torch.cat([sequence, mask_tokens], dim=1) padded_sequence = torch.gather( padded_sequence, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, dim) ) # unshuffle return padded_sequence @add_start_docstrings_to_model_forward(TVLT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TvltForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, audio_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor] = None, audio_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, pixel_values_mixed: Optional[torch.FloatTensor] = None, pixel_mask_mixed: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], TvltForPreTrainingOutput]: r""" pixel_values_mixed (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Pixel values that mix positive and negative samples in Tvlt vision-audio matching. Audio values can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details. pixel_mask_mixed (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel masks of pixel_values_mixed. Pixel values mixed can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size, num_labels)`, *optional*): Labels for computing the vision audio matching loss. Indices should be in `[0, 1]`. num_labels has to be 1. Return: Examples: ```python >>> from transformers import TvltProcessor, TvltForPreTraining >>> import numpy as np >>> import torch >>> num_frames = 8 >>> images = list(np.random.randn(num_frames, 3, 224, 224)) >>> images_mixed = list(np.random.randn(num_frames, 3, 224, 224)) >>> audio = list(np.random.randn(10000)) >>> processor = TvltProcessor.from_pretrained("ZinengTang/tvlt-base") >>> model = TvltForPreTraining.from_pretrained("ZinengTang/tvlt-base") >>> input_dict = processor( ... images, audio, images_mixed, sampling_rate=44100, mask_pixel=True, mask_audio=True, return_tensors="pt" ... ) >>> outputs = model(**input_dict) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict total_loss = 0.0 if self.task_matching: if labels is None: raise ValueError("Matching task requires labels") if pixel_values_mixed is None: raise ValueError("Matching task requires pixel_values_mixed") outputs = self.tvlt( pixel_values_mixed, audio_values, pixel_mask=pixel_mask_mixed, audio_mask=audio_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] matching_logits = self.matching_head(sequence_output) loss_fct = BCEWithLogitsLoss() loss = loss_fct(matching_logits.view(-1), labels.view(-1)) total_loss += loss pixel_logits = None audio_logits = None if self.task_mae and self.training: outputs = self.tvlt( pixel_values, audio_values, pixel_mask=pixel_mask, audio_mask=audio_mask, mask_pixel=True, mask_audio=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pixel_sequence_output = outputs.last_pixel_hidden_state if return_dict else outputs[1] audio_sequence_output = outputs.last_audio_hidden_state if return_dict else outputs[2] pixel_label_masks = outputs.pixel_label_masks if return_dict else outputs[3] audio_label_masks = outputs.audio_label_masks if return_dict else outputs[4] pixel_ids_restore = outputs.pixel_ids_restore if return_dict else outputs[5] audio_ids_restore = outputs.audio_ids_restore if return_dict else outputs[6] pixel_decoder_input = self.encoder_to_decoder( pixel_sequence_output ) # [batch_size, num_masked_pixel_patches, decoder_hidden_size] audio_decoder_input = self.encoder_to_decoder( audio_sequence_output ) # [batch_size, num_masked_audio_patches, decoder_hidden_size] num_frames = pixel_values.size(1) pixel_decoder_input = self.concatenate_mask(self.pixel_mask_token, pixel_decoder_input, pixel_ids_restore) pixel_decoder_input = pixel_decoder_input + self.decoder_pixel_pos_embed.repeat(1, num_frames, 1) pixel_decoder_input = pixel_decoder_input + torch.repeat_interleave( self.decoder_temporal_embed[:, :num_frames], self.num_patches_per_image, dim=1 ) pixel_decoder_input = pixel_decoder_input + self.decoder_pixel_type_embed pixel_decoder_outputs = self.decoder(pixel_decoder_input) pixel_logits = self.pixel_mae_head(pixel_decoder_outputs.logits) audio_decoder_input = self.concatenate_mask(self.audio_mask_token, audio_decoder_input, audio_ids_restore) num_time_patches = audio_decoder_input.size(1) // self.num_freq_patches audio_decoder_input = audio_decoder_input + self.decoder_freq_embed.repeat(1, num_time_patches, 1) audio_decoder_input = audio_decoder_input + torch.repeat_interleave( self.decoder_audio_pos_embed[:, :num_time_patches], self.num_freq_patches, dim=1 ) audio_decoder_input = audio_decoder_input + self.decoder_audio_type_embed audio_decoder_outputs = self.decoder(audio_decoder_input) audio_logits = self.audio_mae_head(audio_decoder_outputs.logits) loss = self.pixel_mae_loss(pixel_values, pixel_logits, pixel_label_masks) + self.audio_mae_loss( audio_values, audio_logits, audio_label_masks ) total_loss += loss if not return_dict: output = (matching_logits, pixel_logits, audio_logits) + outputs[7:] return ((total_loss,) + output) if loss is not None else output return TvltForPreTrainingOutput( loss=total_loss, matching_logits=matching_logits, pixel_logits=pixel_logits, audio_logits=audio_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class TvltPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class TvltMatchingHead(nn.Module): def __init__(self, config): super().__init__() self.pooler = TvltPooler(config) self.fc = nn.Linear(config.hidden_size, 1) def forward(self, hidden_states): hidden_states = self.fc(self.pooler(hidden_states)) return hidden_states class TvltMAEHead(nn.Module): def __init__(self, config, output_dim=None): super().__init__() self.config = config self.decoder = nn.Linear(config.decoder_hidden_size, output_dim) def forward(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states @add_start_docstrings( """ Tvlt Model transformer with a classifier head on top (an MLP on top of the final hidden state of the [CLS] token) for audiovisual classification tasks, e.g. CMU-MOSEI Sentiment Analysis and Audio to Video Retrieval. """, TVLT_START_DOCSTRING, ) class TvltForAudioVisualClassification(TvltPreTrainedModel): def __init__(self, config): super().__init__(config) self.tvlt = TvltModel(config) # Classifier head self.classifier = nn.Sequential( nn.Linear(config.hidden_size, config.hidden_size * 2), nn.LayerNorm(config.hidden_size * 2, eps=config.layer_norm_eps), nn.GELU(), nn.Linear(config.hidden_size * 2, config.num_labels), ) self.config = config # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(TVLT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, audio_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor] = None, audio_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[Tuple[torch.FloatTensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, num_labels)`, *optional*): Labels for computing the audiovisual loss. Indices should be in `[0, ..., num_classes-1]` where num_classes refers to the number of classes in audiovisual tasks. Return: Examples: ```python >>> from transformers import TvltProcessor, TvltForAudioVisualClassification >>> import numpy as np >>> import torch >>> num_frames = 8 >>> images = list(np.random.randn(num_frames, 3, 224, 224)) >>> audio = list(np.random.randn(10000)) >>> processor = TvltProcessor.from_pretrained("ZinengTang/tvlt-base") >>> model = TvltForAudioVisualClassification.from_pretrained("ZinengTang/tvlt-base") >>> input_dict = processor(images, audio, sampling_rate=44100, return_tensors="pt") >>> outputs = model(**input_dict) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tvlt( pixel_values, audio_values, pixel_mask=pixel_mask, audio_mask=audio_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0][:, 0] logits = self.classifier(sequence_output) # rank value loss = None if labels is not None: if self.config.loss_type == "regression": loss_fct = MSELoss() loss = loss_fct(logits, labels) elif self.config.loss_type == "classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[4:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/tvlt/image_processing_tvlt.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for TVLT.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( get_resize_output_image_size, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments, ) from ...utils import TensorType, logging logger = logging.get_logger(__name__) def make_batched(videos) -> List[List[ImageInput]]: if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)): return videos elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): videos_dim = np.array(videos[0]).ndim if videos_dim == 3: return [videos] elif videos_dim == 4: return videos elif is_valid_image(videos): videos_dim = np.array(videos).ndim if videos_dim == 3: return [[videos]] elif videos_dim == 4: return [videos] elif videos_dim == 5: return videos raise ValueError(f"Could not make batched video from {videos}") class TvltImageProcessor(BaseImageProcessor): r""" Constructs a TVLT image processor. This processor can be used to prepare either videos or images for the model by converting images to 1-frame videos. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the output image after resizing. The shortest edge of the image will be resized to `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by `size` in the `preprocess` method. patch_size (`List[int]` *optional*, defaults to [16,16]): The patch size of image patch embedding. num_frames (`int` *optional*, defaults to 8): The maximum number of video frames. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to 1/255): Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = [ "pixel_values", "pixel_mask", "pixel_values_mixed", "pixel_mask_mixed", ] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, patch_size: List[int] = [16, 16], num_frames: int = 8, resample: PILImageResampling = PILImageResampling.BILINEAR, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = IMAGENET_STANDARD_MEAN, image_std: Optional[Union[float, List[float]]] = IMAGENET_STANDARD_STD, init_mask_generator=False, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.patch_size = patch_size self.num_frames = num_frames self.do_center_crop = do_center_crop self.crop_size = crop_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self._valid_processor_keys = [ "videos", "do_resize", "size", "patch_size", "num_frames", "resample", "do_center_crop", "crop_size", "do_rescale", "rescale_factor", "do_normalize", "image_mean", "image_std", "is_mixed", "return_tensors", "data_format", "input_data_format", ] def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its shortest edge of length `s` while keeping the aspect ratio of the original image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" in size: output_size = get_resize_output_image_size( image, size["shortest_edge"], default_to_square=False, input_data_format=input_data_format ) elif "height" in size and "width" in size: output_size = (size["height"], size["width"]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def preprocess( self, videos: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, patch_size: List[int] = None, num_frames: int = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, is_mixed: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> BatchFeature: """ Preprocess an videos or image or batch of videos or images. Args: videos (`ImageInput`): Images or videos to preprocess. Expects a single or batch of frames with pixel values ranging from 0 to 255. If passing in frames with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. patch_size (`List[int]` *optional*, defaults to self.patch_size): The patch size of image patch embedding. num_frames (`int` *optional*, defaults to self.num_frames): The maximum number of video frames. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`): Whether to centre crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying the centre crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. is_mixed (`bool`, *optional*): If the input video has negative samples. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the inferred channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width). - **pixel_mask** -- Pixel masks to be fed to a model, of shape (batch_size, num_pixel_patches). - **pixel_values_mixed** -- Pixel values with both postive or negative to be fed to a model, of shape (batch_size, num_channels, height, width). - **pixel_mask_mixed** -- Pixel masks with both postive or negative to be fed to a model, of shape (batch_size, num_pixel_patches). """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") patch_size = patch_size if patch_size is not None else self.patch_size num_frames = num_frames if patch_size is not None else self.num_frames validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(videos): raise ValueError( "Invalid image or video type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) videos = make_batched(videos) # Check number of frames is fewer than maximum frames for video in videos: if len(video) > self.num_frames: raise ValueError( f"number of frames must not be greater than the maximum frames of the model {self.num_frames}." ) max_num_frames = max([len(video) for video in videos]) num_patches_per_image = (size["shortest_edge"] // patch_size[0]) ** 2 video_masks = np.array( [ len(video) * num_patches_per_image * [1] + (max_num_frames - len(video)) * num_patches_per_image * [0] for video in videos ] ) videos = [ [ self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format, ) for img in video ] for video in videos ] # If videos contain both positive/negative, use mixed key for video-audio matching task if is_mixed: data = {"pixel_values_mixed": videos, "pixel_mask_mixed": video_masks} else: data = {"pixel_values": videos, "pixel_mask": video_masks} return BatchFeature(data=data, tensor_type=return_tensors)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/patchtst/modeling_patchtst.py
# coding=utf-8 # Copyright 2023 IBM & Hugging Face. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch PatchTST model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from torch import nn from ...activations import ACT2CLS from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput from ...utils import ModelOutput, add_start_docstrings, logging from .configuration_patchtst import PatchTSTConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "PatchTSTConfig" from ..deprecated._archive_maps import PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PatchTST class PatchTSTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[PatchTSTConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class PatchTSTBatchNorm(nn.Module): """ Compute batch normalization over the sequence length (time) dimension. """ def __init__(self, config: PatchTSTConfig): super().__init__() self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps) def forward(self, inputs: torch.Tensor): """ Parameters: inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`): input for Batch norm calculation Returns: `torch.Tensor` of shape `(batch_size, sequence_length, d_model)` """ output = inputs.transpose(1, 2) # output: (batch_size, d_model, sequence_length) output = self.batchnorm(output) return output.transpose(1, 2) def random_masking( inputs: torch.Tensor, mask_ratio: float, unmasked_channel_indices: list = None, channel_consistent_masking: bool = False, mask_value: int = 0, ): """random_masking: Mask the input considering the control variables. Args: inputs (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, num_features)`): The input tensor to mask. mask_ratio (`float`): Masking ratio applied to mask the input data during random pretraining. It is the number between 0 and 1. unmasked_channel_indices (list, *optional*): Indices of channels that will not be masked. channel_consistent_masking (bool, *optional*, defaults to `False`): When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary across channels. mask_value (int, *optional*, defaults to 0): Define the value of masked patches for pretraining. Returns: `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as input Tensor and mask tensor of shape [bs x c x n] """ if mask_ratio < 0 or mask_ratio >= 1: raise ValueError(f"Mask ratio {mask_ratio} has to be between 0 and 1.") batch_size, num_channels, sequence_length, num_features = inputs.shape device = inputs.device len_keep = int(sequence_length * (1 - mask_ratio)) if channel_consistent_masking: noise = torch.rand(batch_size, 1, sequence_length, device=device) # noise in [0, 1], bs x 1 x L noise = noise.repeat(1, num_channels, 1) # bs x num_channels x time else: # noise in [0, 1], bs x num_channels x L noise = torch.rand(batch_size, num_channels, sequence_length, device=device) # mask: [bs x num_channels x num_patch] mask = torch.ones(batch_size, num_channels, sequence_length, device=device) mask[:, :, :len_keep] = 0 # sort noise for each sample ids_shuffle = torch.argsort(noise, dim=-1) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=-1) # ids_restore: [bs x num_channels x L] mask = torch.gather(mask, dim=-1, index=ids_restore) mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patches x patch_length] if unmasked_channel_indices is not None: mask[:, unmasked_channel_indices, :, :] = 0 inputs_mask = inputs.masked_fill(mask.bool(), mask_value) return inputs_mask, mask[..., 0] def forecast_masking( inputs: torch.Tensor, num_forecast_mask_patches: Union[list, int], unmasked_channel_indices: list = None, mask_value: int = 0, ): """Forecast masking that masks the last K patches where K is from the num_forecast_mask_patches. If num_forecast_mask_patches is a list, samples in the batch will be randomly masked by numbers defined in the list. Parameters: inputs (`torch.Tensor`): Input of shape `(bs, num_channels, num_patch, patch_length)` num_forecast_mask_patches (`list`): Number of patches to be masked at the end of each batch sample. e.g. 4 or [3, 5]. unmasked_channel_indices (`list`, *optional*): Indices of channels that are not masked. mask_value (`int`, *optional*, defaults to 0): Values in the masked patches will be filled by `mask_value`. Returns: `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as inputs Tensor and Mask tensor of shape `(bs, num_channels , num_patch)` or `(bs, tsg1, tsg2, num_channels, num_patch)` """ if isinstance(num_forecast_mask_patches, int): num_forecast_mask_patches = [num_forecast_mask_patches] forecast_mask_ratios = [1 for _ in num_forecast_mask_patches] batch_size, num_channels, sequence_length, num_features = inputs.shape mask = torch.zeros(batch_size, num_channels, sequence_length, device=inputs.device) t_list = [] total_length = 0 total_ratio = sum(forecast_mask_ratios) for patch_length, ratio in zip(num_forecast_mask_patches, forecast_mask_ratios): if patch_length <= 0 or patch_length >= sequence_length: raise ValueError( f"num_forecast_mask_patches {patch_length} should be greater than 0 and less than total patches." ) temp_len = int(batch_size * ratio / total_ratio) t_list.append([patch_length, ratio, temp_len]) total_length += temp_len t_list = sorted(t_list, key=lambda x: x[2]) if total_length < batch_size: t_list[0][2] = t_list[0][2] + (batch_size - total_length) elif total_length > batch_size: t_list[-1][2] = t_list[-1][2] + (total_length - batch_size) batch1 = 0 for patch_len, _, temp_len in t_list: batch2 = batch1 + temp_len mask[batch1:batch2, :, -patch_len:] = 1 batch1 = batch2 perm = torch.randperm(mask.shape[0]) mask = mask[perm] mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patch x patch_len] if unmasked_channel_indices is not None: mask[:, unmasked_channel_indices, :, :] = 0 inputs_mask = inputs.masked_fill(mask.bool(), mask_value) return inputs_mask, mask[..., 0] class PatchTSTPatchify(nn.Module): """ A class to patchify the time series sequence into different patches Returns: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` """ def __init__(self, config: PatchTSTConfig): super().__init__() self.sequence_length = config.context_length self.patch_length = config.patch_length self.patch_stride = config.patch_stride if self.sequence_length <= self.patch_length: raise ValueError( f"Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})" ) # get the number of patches self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1) self.sequence_start = self.sequence_length - new_sequence_length def forward(self, past_values: torch.Tensor): """ Parameters: past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*): Input for patchification Returns: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` """ sequence_length = past_values.shape[-2] if sequence_length != self.sequence_length: raise ValueError( f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length})." ) # output: [bs x new_sequence_length x num_channels] output = past_values[:, self.sequence_start :, :] # output: [bs x num_patches x num_input_channels x patch_length] output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride) # output: [bs x num_input_channels x num_patches x patch_length] output = output.transpose(-2, -3).contiguous() return output class PatchTSTMasking(nn.Module): """ Class to perform random or forecast masking. Parameters: config (`PatchTSTConfig`): model config Returns: x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) Masked patched input mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) Bool tensor indicating True on masked points """ def __init__(self, config: PatchTSTConfig): super().__init__() self.random_mask_ratio = config.random_mask_ratio self.channel_consistent_masking = config.channel_consistent_masking self.mask_type = config.mask_type self.num_forecast_mask_patches = config.num_forecast_mask_patches self.unmasked_channel_indices = config.unmasked_channel_indices self.mask_value = config.mask_value if self.unmasked_channel_indices is not None: self.unmasked_channel_indices = sorted(self.unmasked_channel_indices) def forward(self, patch_input: torch.Tensor): """ Parameters: patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): Patch input Return: masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) Masked patched input mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) Bool tensor indicating True on masked points """ if self.mask_type == "random": masked_input, mask = random_masking( inputs=patch_input, mask_ratio=self.random_mask_ratio, unmasked_channel_indices=self.unmasked_channel_indices, channel_consistent_masking=self.channel_consistent_masking, mask_value=self.mask_value, ) elif self.mask_type == "forecast": masked_input, mask = forecast_masking( inputs=patch_input, num_forecast_mask_patches=self.num_forecast_mask_patches, unmasked_channel_indices=self.unmasked_channel_indices, mask_value=self.mask_value, ) else: raise ValueError(f"Invalid mask type {self.mask_type}.") # mask: [bs x num_input_channels x num_patch] mask = mask.bool() return masked_input, mask class PatchTSTEncoderLayer(nn.Module): """ PatchTST encoder layer """ def __init__(self, config: PatchTSTConfig): super().__init__() self.channel_attention = config.channel_attention # Multi-Head attention self.self_attn = PatchTSTAttention( embed_dim=config.d_model, num_heads=config.num_attention_heads, dropout=config.attention_dropout, ) # Add & Norm of the sublayer 1 self.dropout_path1 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity() if config.norm_type == "batchnorm": self.norm_sublayer1 = PatchTSTBatchNorm(config) elif config.norm_type == "layernorm": self.norm_sublayer1 = nn.LayerNorm(config.d_model, eps=config.norm_eps) else: raise ValueError(f"{config.norm_type} is not a supported norm layer type.") # Add & Norm of the sublayer 2 if self.channel_attention: self.dropout_path2 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity() if config.norm_type == "batchnorm": self.norm_sublayer2 = PatchTSTBatchNorm(config) elif config.norm_type == "layernorm": self.norm_sublayer2 = nn.LayerNorm(config.d_model, eps=config.norm_eps) else: raise ValueError(f"{config.norm_type} is not a supported norm layer type.") # Position-wise Feed-Forward self.ff = nn.Sequential( nn.Linear(config.d_model, config.ffn_dim, bias=config.bias), ACT2CLS[config.activation_function](), nn.Dropout(config.ff_dropout) if config.ff_dropout > 0 else nn.Identity(), nn.Linear(config.ffn_dim, config.d_model, bias=config.bias), ) # Add & Norm of sublayer 3 self.dropout_path3 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity() if config.norm_type == "batchnorm": self.norm_sublayer3 = PatchTSTBatchNorm(config) elif config.norm_type == "layernorm": self.norm_sublayer3 = nn.LayerNorm(config.d_model, eps=config.norm_eps) else: raise ValueError(f"{config.norm_type} is not a supported norm layer type.") self.pre_norm = config.pre_norm def forward(self, hidden_state: torch.Tensor, output_attentions: Optional[bool] = None): """ Parameters: hidden_state (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`, *required*): Past values of the time series output_attentions (`bool`, *optional*): Whether or not to return the output attention of all layers Return: `torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)` """ batch_size, num_input_channels, sequence_length, d_model = hidden_state.shape # First sublayer: attention across time # hidden_states: [(bs*num_channels) x sequence_length x d_model] hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model) if self.pre_norm: ## Norm and Multi-Head attention and Add residual connection attn_output, attn_weights, _ = self.self_attn( hidden_states=self.norm_sublayer1(hidden_state), output_attentions=output_attentions ) # Add: residual connection with residual dropout hidden_state = hidden_state + self.dropout_path1(attn_output) else: ## Multi-Head attention and Add residual connection and Norm - Standard Transformer from BERT attn_output, attn_weights, _ = self.self_attn( hidden_states=hidden_state, output_attentions=output_attentions ) # hidden_states: [(bs*num_channels) x sequence_length x d_model] hidden_state = self.norm_sublayer1(hidden_state + self.dropout_path1(attn_output)) # hidden_state: [bs x num_channels x sequence_length x d_model] hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model) # second sublayer: attention across variable at any given time if self.channel_attention: # hidden_state: [bs x sequence_length x num_channels x d_model] hidden_state = hidden_state.transpose(2, 1).contiguous() # hidden_state: [(bs*sequence_length) x num_channels x d_model] hidden_state = hidden_state.view(batch_size * sequence_length, num_input_channels, d_model) if self.pre_norm: ## Norm and Multi-Head attention and Add residual connection attn_output, channel_attn_weights, _ = self.self_attn( hidden_states=self.norm_sublayer2(hidden_state), output_attentions=output_attentions ) # Add: residual connection with residual dropout hidden_state = hidden_state + self.dropout_path2(attn_output) else: ## Multi-Head attention and Add residual connection and Norm attn_output, channel_attn_weights, _ = self.self_attn( hidden_states=hidden_state, output_attentions=output_attentions ) # hidden_states: [(bs*sequence_length) x num_channels x d_model] hidden_state = self.norm_sublayer2(hidden_state + self.dropout_path2(attn_output)) # Reshape hidden state # hidden_state: [bs x sequence_length x num_channels x d_model] hidden_state = hidden_state.reshape(batch_size, sequence_length, num_input_channels, d_model) # hidden_state: [bs x num_channels x sequence_length x d_model] hidden_state = hidden_state.transpose(1, 2).contiguous() # Third sublayer: mixing across hidden # hidden_state: [(batch_size*num_channels) x sequence_length x d_model] hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model) if self.pre_norm: ## Norm and Position-wise Feed-Forward and Add residual connection # Add: residual connection with residual dropout hidden_state = hidden_state + self.dropout_path3(self.ff(self.norm_sublayer3(hidden_state))) else: ## Position-wise Feed-Forward and Add residual connection and Norm # Add: residual connection with residual dropout hidden_state = self.norm_sublayer3(hidden_state + self.dropout_path3(self.ff(hidden_state))) # [bs x num_channels x sequence_length x d_model] hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model) outputs = (hidden_state,) if output_attentions: outputs += (attn_weights, channel_attn_weights) if self.channel_attention else (attn_weights,) return outputs class PatchTSTPreTrainedModel(PreTrainedModel): config_class = PatchTSTConfig base_model_prefix = "model" main_input_name = "past_values" supports_gradient_checkpointing = False def _init_weights(self, module): """ Initialize weights """ if isinstance(module, PatchTSTPositionalEncoding): # initialize cls_token if self.config.use_cls_token: nn.init.normal_(module.cls_token, std=0.02) # initialize positional encoding if self.config.positional_encoding_type == "random": nn.init.normal_(module.position_enc, mean=0.0, std=0.1) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, PatchTSTBatchNorm): module.batchnorm.bias.data.zero_() module.batchnorm.weight.data.fill_(1.0) elif isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (PatchTSTEncoder)): module.gradient_checkpointing = value class PatchTSTEmbedding(nn.Module): def __init__(self, config: PatchTSTConfig): super().__init__() self.num_input_channels = config.num_input_channels self.share_embedding = config.share_embedding # Input encoding: projection of feature vectors onto a d-dim vector space if self.share_embedding: self.input_embedding = nn.Linear(config.patch_length, config.d_model) else: self.input_embedding = nn.ModuleList() for _ in range(config.num_input_channels): self.input_embedding.append(nn.Linear(config.patch_length, config.d_model)) def forward(self, patch_input: torch.Tensor): """ Parameters: patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): Patch input for embedding return: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, d_model)` """ # Input encoding num_input_channels = patch_input.shape[1] if num_input_channels != self.num_input_channels: raise ValueError( f"The defined number of input channels ({self.num_input_channels}) in the config " f"has to be the same as the number of channels in the batch input ({num_input_channels})" ) if self.share_embedding: embeddings = self.input_embedding(patch_input) # x: [bs x num_channels x num_patches x d_model] else: embeddings = [self.input_embedding[i](patch_input[:, i, :, :]) for i in range(num_input_channels)] embeddings = torch.stack(embeddings, dim=1) return embeddings class PatchTSTPositionalEncoding(nn.Module): """ Class for positional encoding """ def __init__(self, config: PatchTSTConfig, num_patches: int): super().__init__() self.use_cls_token = config.use_cls_token self.num_input_channels = config.num_input_channels if config.use_cls_token: # cls_token: [1 x num_input_channels x 1 x d_model] self.cls_token = nn.Parameter(torch.zeros(1, 1, 1, config.d_model)) num_patches += 1 # postional encoding: [num_patches x d_model] self.position_enc = self._init_pe(config, num_patches) # Positional dropout self.positional_dropout = ( nn.Dropout(config.positional_dropout) if config.positional_dropout > 0 else nn.Identity() ) @staticmethod def _init_pe(config: PatchTSTConfig, num_patches: int) -> nn.Parameter: # Positional encoding if config.positional_encoding_type == "random": position_enc = nn.Parameter(torch.randn(num_patches, config.d_model), requires_grad=True) elif config.positional_encoding_type == "sincos": position_enc = torch.zeros(num_patches, config.d_model) position = torch.arange(0, num_patches).unsqueeze(1) div_term = torch.exp(torch.arange(0, config.d_model, 2) * -(math.log(10000.0) / config.d_model)) position_enc[:, 0::2] = torch.sin(position * div_term) position_enc[:, 1::2] = torch.cos(position * div_term) position_enc = position_enc - position_enc.mean() position_enc = position_enc / (position_enc.std() * 10) position_enc = nn.Parameter(position_enc, requires_grad=False) else: raise ValueError( f"{config.positional_encoding_type} is not a valid positional encoder. Available types are 'random' and 'sincos'." ) return position_enc def forward(self, patch_input: torch.Tensor): if self.use_cls_token: # patch_input: [bs x num_channels x num_patches x d_model] patch_input = self.positional_dropout(patch_input + self.position_enc[1:, :]) # append cls token where cls_token: [1 x num_channels x 1 x d_model] cls_token = self.cls_token + self.position_enc[:1, :] # get the same copy of cls_token for all the samples in batch: [bs x num_channels x 1 x d_model] cls_tokens = cls_token.expand(patch_input.shape[0], self.num_input_channels, -1, -1) # hidden_state: [bs x num_channels x (num_patches+1) x d_model] hidden_state = torch.cat((cls_tokens, patch_input), dim=2) else: # hidden_state: [bs x num_channels x num_patches x d_model] hidden_state = self.positional_dropout(patch_input + self.position_enc) return hidden_state class PatchTSTEncoder(PatchTSTPreTrainedModel): """ PatchTST Encoder """ def __init__(self, config: PatchTSTConfig, num_patches: int): super().__init__(config) self.gradient_checkpointing = False # Input embedding: projection of feature vectors onto a d-dim vector space self.embedder = PatchTSTEmbedding(config) # Positional encoding self.positional_encoder = PatchTSTPositionalEncoding(config, num_patches) # Encoder self.layers = nn.ModuleList([PatchTSTEncoderLayer(config) for i in range(config.num_hidden_layers)]) # Initialize weights and apply final processing self.post_init() def forward( self, patch_input: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, ) -> BaseModelOutput: """ Parameters: patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): Past values of the time series output_hidden_states (bool, optional): Indicates if hidden states should be outputted. output_attentions (bool, optional): Indicates if attentions should be outputted. return: `BaseModelOutput` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # Input embedding patch_input = self.embedder(patch_input) # Positional encoding hidden_state = self.positional_encoder(patch_input) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_state,) layer_outputs = encoder_layer(hidden_state=hidden_state, output_attentions=output_attentions) # get hidden state. hidden_state shape is [bs x num_channels x num_patches x d_model] # or [bs x num_channels x (num_patches+1) x d_model] if use cls_token hidden_state = layer_outputs[0] # append attention matrix at each layer if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # return past_values, hidden_states return BaseModelOutput(last_hidden_state=hidden_state, hidden_states=encoder_states, attentions=all_attentions) PATCHTST_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PatchTSTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @dataclass class PatchTSTModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states. Parameters: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. mask: (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`, *optional*) Bool masked tensor indicating which patches are masked loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*) Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*) Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): Patched input to the Transformer """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None mask: torch.FloatTensor = None loc: torch.FloatTensor = None scale: torch.FloatTensor = None patch_input: torch.FloatTensor = None @dataclass class PatchTSTForPretrainingOutput(ModelOutput): """ Output type of [`PatchTSTForPretraining`]. Parameters: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): MSE loss. prediction_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction outputs of the time series modeling heads. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class PatchTSTForRegressionOutput(ModelOutput): """ Output type of [`PatchTSTForRegression`]. Parameters: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): MSE loss. regression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`): Regression outputs of the time series modeling heads. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None regression_outputs: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class PatchTSTForPredictionOutput(ModelOutput): """ Output type of [`PatchTSTForPrediction`]. Parameters: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): MSE loss. prediction_outputs (`torch.FloatTensor` of shape `(batch_size, prediction_length, -1)`): Prediction outputs of the time series modeling heads. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*) Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*) Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length """ loss: Optional[torch.FloatTensor] = None prediction_outputs: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None loc: torch.FloatTensor = None scale: torch.FloatTensor = None @dataclass class PatchTSTForClassificationOutput(ModelOutput): """ Output type of [`PatchTSTForClassification`]. Parameters: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, num_targets)`): Prediction scores of the PatchTST modeling head (scores before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class SamplePatchTSTOutput(ModelOutput): """ Base class for time series model's predictions outputs that contains the sampled values from the chosen distribution. Parameters: sequences `(batch_size, num_samples, prediction_length, num_targets)`): Sampled values from the chosen distribution. """ sequences: torch.FloatTensor = None # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: """ Computes the negative log likelihood loss from input distribution with respect to target. """ return -input.log_prob(target) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor: """ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero, meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`. Args: input_tensor (`torch.FloatTensor`): Input tensor, of which the average must be computed. weights (`torch.FloatTensor`, *optional*): Weights tensor, of the same shape as `input_tensor`. dim (`int`, *optional*): The dim along which to average `input_tensor`. Returns: `torch.FloatTensor`: The tensor with values averaged along the specified `dim`. """ if weights is not None: weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights else: return input_tensor.mean(dim=dim) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST class PatchTSTStdScaler(nn.Module): """ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by subtracting from the mean and dividing by the standard deviation. """ def __init__(self, config: PatchTSTConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5 def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST class PatchTSTMeanScaler(nn.Module): """ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. """ def __init__(self, config: PatchTSTConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 self.default_scale = config.default_scale if hasattr(config, "default_scale") else None def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) scale = ts_sum / torch.clamp(num_observed, min=1) # If `default_scale` is provided, we use it, otherwise we use the scale # of the batch. if self.default_scale is None: batch_sum = ts_sum.sum(dim=0) batch_observations = torch.clamp(num_observed.sum(0), min=1) default_scale = torch.squeeze(batch_sum / batch_observations) else: default_scale = self.default_scale * torch.ones_like(scale) # apply default scale where there are no observations scale = torch.where(num_observed > 0, scale, default_scale) # ensure the scale is at least `self.minimum_scale` scale = torch.clamp(scale, min=self.minimum_scale) scaled_data = data / scale if not self.keepdim: scale = scale.squeeze(dim=self.dim) return scaled_data, torch.zeros_like(scale), scale # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST class PatchTSTNOPScaler(nn.Module): """ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ def __init__(self, config: PatchTSTConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale class PatchTSTScaler(nn.Module): def __init__(self, config: PatchTSTConfig): super().__init__() if config.scaling == "mean" or config.scaling is True: self.scaler = PatchTSTMeanScaler(config) elif config.scaling == "std": self.scaler = PatchTSTStdScaler(config) else: self.scaler = PatchTSTNOPScaler(config) def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): Input for scaler calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, um_input_channels)`) """ data, loc, scale = self.scaler(data, observed_indicator) return data, loc, scale @add_start_docstrings( "The bare PatchTST Model outputting raw hidden-states without any specific head.", PATCHTST_START_DOCSTRING, ) class PatchTSTModel(PatchTSTPreTrainedModel): def __init__(self, config: PatchTSTConfig): super().__init__(config) self.scaler = PatchTSTScaler(config) self.patchifier = PatchTSTPatchify(config) self.do_mask_input = config.do_mask_input # get num_patches information from PatchTSTPatchify num_patches = self.patchifier.num_patches if self.do_mask_input: self.masking = PatchTSTMasking(config) else: self.masking = nn.Identity() self.encoder = PatchTSTEncoder(config, num_patches=num_patches) # Initialize weights and apply final processing self.post_init() def forward( self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, PatchTSTModelOutput]: r""" Parameters: past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): Input sequence to the model past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). future_values (`torch.BoolTensor` of shape `(batch_size, prediction_length, num_input_channels)`, *optional*): Future target values associated with the `past_values` output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers output_attentions (`bool`, *optional*): Whether or not to return the output attention of all layers return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. Returns: `PatchTSTModelOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False) Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import PatchTSTModel >>> file = hf_hub_download( ... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> model = PatchTSTModel.from_pretrained("namctin/patchtst_etth1_pretrain") >>> # during training, one provides both past and future values >>> outputs = model( ... past_values=batch["past_values"], ... future_values=batch["future_values"], ... ) >>> last_hidden_state = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if past_observed_mask is None: past_observed_mask = torch.ones_like(past_values) # x: tensor [bs x sequence_length x num_input_channels] scaled_past_values, loc, scale = self.scaler(past_values, past_observed_mask) # patched_values: [bs x num_input_channels x num_patches x patch_length] for pretrain patched_values = self.patchifier(scaled_past_values) if self.do_mask_input: masked_values, mask = self.masking(patched_values) else: masked_values, mask = self.masking(patched_values), None encoder_output = self.encoder( patch_input=masked_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions ) if not return_dict: outputs = (encoder_output.last_hidden_state, encoder_output.hidden_states, encoder_output.attentions) outputs = outputs + (mask, loc, scale, patched_values) return tuple(v for v in outputs if v is not None) return PatchTSTModelOutput( last_hidden_state=encoder_output.last_hidden_state, hidden_states=encoder_output.hidden_states, attentions=encoder_output.attentions, mask=mask, loc=loc, scale=scale, patch_input=patched_values, ) class PatchTSTMaskPretrainHead(nn.Module): """ Pretraining head for mask modelling """ def __init__(self, config: PatchTSTConfig): super().__init__() self.dropout = nn.Dropout(config.dropout) self.linear = nn.Linear(config.d_model, config.patch_length) self.use_cls_token = config.use_cls_token def forward(self, embedding: torch.Tensor) -> torch.Tensor: """ Parameters: embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): Embedding from the model Returns: `torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True """ embedding = self.linear(self.dropout(embedding)) # [bs x num_channels x num_patches x patch_length] if self.use_cls_token: embedding = embedding[:, :, 1:, :] # remove the first cls token return embedding @add_start_docstrings( "The PatchTST for pretrain model.", PATCHTST_START_DOCSTRING, ) class PatchTSTForPretraining(PatchTSTPreTrainedModel): def __init__(self, config: PatchTSTConfig): super().__init__(config) config.do_mask_input = True self.model = PatchTSTModel(config=config) self.head = PatchTSTMaskPretrainHead(config) # Initialize weights and apply final processing self.post_init() def forward( self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, PatchTSTForPretrainingOutput]: r""" Parameters: past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): Input sequence to the model past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers output_attentions (`bool`, *optional*): Whether or not to return the output attention of all layers return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. Returns: `PatchTSTForPretrainingOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False) Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import PatchTSTConfig, PatchTSTForPretraining >>> file = hf_hub_download( ... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> # Config for random mask pretraining >>> config = PatchTSTConfig( ... num_input_channels=7, ... context_length=512, ... patch_length=12, ... stride=12, ... mask_type='random', ... random_mask_ratio=0.4, ... use_cls_token=True, ... ) >>> # Config for forecast mask pretraining >>> config = PatchTSTConfig( ... num_input_channels=7, ... context_length=512, ... patch_length=12, ... stride=12, ... mask_type='forecast', ... num_forecast_mask_patches=5, ... use_cls_token=True, ... ) >>> model = PatchTSTForPretraining(config) >>> # during training, one provides both past and future values >>> outputs = model(past_values=batch["past_values"]) >>> loss = outputs.loss >>> loss.backward() ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # past_values: [bs x num_channels x num_patches x d_model] or # [bs x num_channels x (num_patches+1) x d_model] if use cls_token model_output = self.model( past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True, ) # last_hidden_state: [bs x num_channels x num_patches x patch_length] or # [bs x num_channels x (num_patches+1) x patch_length] if use cls_token x_hat = self.head(model_output.last_hidden_state) # calculate masked_loss loss = nn.MSELoss(reduction="none") loss_val = loss(x_hat, model_output.patch_input) masked_loss = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10) encoder_states = model_output.hidden_states if not return_dict: outputs = (x_hat,) + model_output[1:-4] outputs = (masked_loss,) + outputs if masked_loss is not None else outputs return outputs return PatchTSTForPretrainingOutput( loss=masked_loss, prediction_output=x_hat, hidden_states=encoder_states, attentions=model_output.attentions ) class PatchTSTClassificationHead(nn.Module): def __init__(self, config: PatchTSTConfig): super().__init__() self.use_cls_token = config.use_cls_token self.pooling_type = config.pooling_type self.flatten = nn.Flatten(start_dim=1) self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() self.linear = nn.Linear(config.num_input_channels * config.d_model, config.num_targets) def forward(self, embedding: torch.Tensor): """ Parameters: embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): Embedding from the model Returns: `torch.Tensor` of shape `(bs, num_targets)` """ if self.use_cls_token: # use the first output token, pooled_embedding: bs x num_channels x d_model pooled_embedding = embedding[:, :, 0, :] elif self.pooling_type == "mean": # pooled_embedding: [bs x num_channels x d_model] pooled_embedding = embedding.mean(dim=2) elif self.pooling_type == "max": # pooled_embedding: [bs x num_channels x d_model] pooled_embedding = embedding.max(dim=2).values else: raise ValueError(f"pooling operator {self.pooling_type} is not implemented yet") # pooled_embedding: bs x num_channels * d_model pooled_embedding = self.flatten(pooled_embedding) # output: bs x n_classes output = self.linear(self.dropout(pooled_embedding)) return output @add_start_docstrings( "The PatchTST for classification model.", PATCHTST_START_DOCSTRING, ) class PatchTSTForClassification(PatchTSTPreTrainedModel): def __init__(self, config: PatchTSTConfig): super().__init__(config) # Turn off masking if config.do_mask_input: logger.warning("Setting `do_mask_input` parameter to False.") config.do_mask_input = False self.model = PatchTSTModel(config) self.head = PatchTSTClassificationHead(config) # Initialize weights and apply final processing self.post_init() def forward( self, past_values: torch.Tensor, target_values: torch.Tensor = None, past_observed_mask: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, PatchTSTForClassificationOutput]: r""" Parameters: past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): Input sequence to the model target_values (`torch.Tensor`, *optional*): Labels associates with the `past_values` past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers output_attentions (`bool`, *optional*): Whether or not to return the output attention of all layers return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. Returns: `PatchTSTForClassificationOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False) Examples: ```python >>> from transformers import PatchTSTConfig, PatchTSTForClassification >>> # classification task with two input channel2 and 3 classes >>> config = PatchTSTConfig( ... num_input_channels=2, ... num_targets=3, ... context_length=512, ... patch_length=12, ... stride=12, ... use_cls_token=True, ... ) >>> model = PatchTSTForClassification(config=config) >>> # during inference, one only provides past values >>> past_values = torch.randn(20, 512, 2) >>> outputs = model(past_values=past_values) >>> labels = outputs.prediction_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict model_output = self.model( past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True, ) y_hat = self.head(model_output.last_hidden_state) loss_val = None if target_values is not None: loss = nn.CrossEntropyLoss() loss_val = loss(y_hat, target_values) if not return_dict: outputs = (y_hat,) + model_output[1:-3] outputs = (loss_val,) + outputs if loss_val is not None else outputs return outputs return PatchTSTForClassificationOutput( loss=loss_val, prediction_logits=y_hat, hidden_states=model_output.hidden_states, attentions=model_output.attentions, ) @add_start_docstrings( "The PatchTST for regression Model.", PATCHTST_START_DOCSTRING, ) class PatchTSTPredictionHead(nn.Module): def __init__(self, config: PatchTSTConfig, num_patches, distribution_output=None): super().__init__() self.share_projection = config.share_projection self.num_input_channels = config.num_input_channels self.use_cls_token = config.use_cls_token self.pooling_type = config.pooling_type if self.pooling_type or self.use_cls_token: head_dim = config.d_model else: head_dim = config.d_model * num_patches if not self.share_projection: # if each channel has its own head self.projections = nn.ModuleList() self.dropouts = nn.ModuleList() self.flattens = nn.ModuleList() for i in range(self.num_input_channels): self.flattens.append(nn.Flatten(start_dim=2)) if distribution_output is None: # use linear head self.projections.append(nn.Linear(head_dim, config.prediction_length)) else: # use distribution head self.projections.append(distribution_output.get_parameter_projection(head_dim)) self.dropouts.append(nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()) else: # all the channels share the same head self.flatten = nn.Flatten(start_dim=2) if distribution_output is None: # use linear head self.projection = nn.Linear(head_dim, config.prediction_length) else: # use distribution head self.projection = distribution_output.get_parameter_projection(head_dim) self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() def forward(self, embedding: torch.Tensor): """ Parameters: embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): Embedding from the model Returns: `torch.Tensor` of shape `(bs, forecast_len, num_channels)` """ if self.use_cls_token: # pooled_embedding: [bs x num_channels x d_model] pooled_embedding = embedding[:, :, 0, :] else: if self.pooling_type == "mean": # pooled_embedding: [bs x num_channels x d_model] pooled_embedding = embedding.mean(dim=2) elif self.pooling_type == "max": # pooled_embedding: [bs x num_channels x d_model] pooled_embedding = embedding.max(dim=2).values else: # pooled_embedding: [bs x num_channels x num_patches x d_model] pooled_embedding = embedding if not self.share_projection: output = [] for i in range(self.num_input_channels): # pooled_embedding: [bs x (d_model * num_patches)] or [bs x d_model)] pooled_embedding = self.flattens[i](pooled_embedding[:, i, :]) pooled_embedding = self.dropouts[i](pooled_embedding) # pooled_embedding: [bs x forecast_len] # or tuple ([bs x forecast_len], [bs x forecast_len]) if using distribution head pooled_embedding = self.projections[i](pooled_embedding) output.append(pooled_embedding) # output: [bs x num_channels x forecast_len] output = torch.stack(output, dim=1) else: # pooled_embedding: [bs x num_channels x (d_model * num_patches)] or [bs x num_channels x d_model)] pooled_embedding = self.flatten(pooled_embedding) pooled_embedding = self.dropout(pooled_embedding) # output: [bs x num_channels x forecast_len] or # tuple ([bs x num_channels x forecast_len], [bs x num_channels x forecast_len]) if using distribution head output = self.projection(pooled_embedding) if isinstance(output, tuple): # output: ([bs x forecast_len x num_channels], [bs x forecast_len x num_channels]) output = tuple(z.transpose(2, 1) for z in output) else: output = output.transpose(2, 1) # [bs x forecast_len x num_channels] return output @add_start_docstrings( "The PatchTST for prediction model.", PATCHTST_START_DOCSTRING, ) class PatchTSTForPrediction(PatchTSTPreTrainedModel): def __init__(self, config: PatchTSTConfig): super().__init__(config) # Turn off masking if config.do_mask_input: logger.warning("Setting `do_mask_input` parameter to False.") config.do_mask_input = False self.model = PatchTSTModel(config) if config.loss == "mse": self.distribution_output = None else: if config.distribution_output == "student_t": self.distribution_output = StudentTOutput(dim=config.prediction_length) elif config.distribution_output == "normal": self.distribution_output = NormalOutput(dim=config.prediction_length) elif config.distribution_output == "negative_binomial": self.distribution_output = NegativeBinomialOutput(dim=config.prediction_length) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") self.head = PatchTSTPredictionHead( config, self.model.patchifier.num_patches, distribution_output=self.distribution_output ) # Initialize weights and apply final processing self.post_init() def forward( self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, PatchTSTForPredictionOutput]: r""" Parameters: past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): Input sequence to the model past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). future_values (`torch.Tensor` of shape `(bs, forecast_len, num_input_channels)`, *optional*): Future target values associated with the `past_values` output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers output_attentions (`bool`, *optional*): Whether or not to return the output attention of all layers return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. Returns: `PatchTSTForPredictionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False) Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import PatchTSTConfig, PatchTSTForPrediction >>> file = hf_hub_download( ... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> # Prediction task with 7 input channels and prediction length is 96 >>> model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast") >>> # during training, one provides both past and future values >>> outputs = model( ... past_values=batch["past_values"], ... future_values=batch["future_values"], ... ) >>> loss = outputs.loss >>> loss.backward() >>> # during inference, one only provides past values, the model outputs future values >>> outputs = model(past_values=batch["past_values"]) >>> prediction_outputs = outputs.prediction_outputs ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # get model output model_output = self.model( past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True, ) # get output head y_hat = self.head(model_output.last_hidden_state) loss_val = None if self.distribution_output: y_hat_out = y_hat else: y_hat_out = y_hat * model_output.scale + model_output.loc if future_values is not None: if self.distribution_output: distribution = self.distribution_output.distribution( y_hat, loc=model_output.loc, scale=model_output.scale ) loss_val = nll(distribution, future_values) # take average of the loss loss_val = weighted_average(loss_val) else: loss = nn.MSELoss(reduction="mean") loss_val = loss(y_hat_out, future_values) loc = model_output.loc scale = model_output.scale if not return_dict: outputs = (y_hat_out,) + model_output[1:-1] outputs = (loss_val,) + outputs if loss_val is not None else outputs return outputs return PatchTSTForPredictionOutput( loss=loss_val, prediction_outputs=y_hat_out, hidden_states=model_output.hidden_states, attentions=model_output.attentions, loc=loc, scale=scale, ) def generate( self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, ) -> SamplePatchTSTOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Parameters: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the future. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Return: [`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, prediction_length, 1)` or `(batch_size, number of samples, prediction_length, num_input_channels)` for multivariate predictions. """ # get number of samples num_parallel_samples = self.config.num_parallel_samples # get model output outputs = self( past_values=past_values, future_values=None, past_observed_mask=past_observed_mask, output_hidden_states=False, ) if self.distribution_output: # get distribution distribution = self.distribution_output.distribution( outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale ) # get samples: list of [bs x forecast_len x num_channels] samples = [distribution.sample() for _ in range(num_parallel_samples)] # samples: [bs x num_samples x forecast_len x num_channels] samples = torch.stack(samples, dim=1) else: samples = outputs.prediction_outputs.unsqueeze(1) return SamplePatchTSTOutput(sequences=samples) class PatchTSTRegressionHead(nn.Module): """ Regression head """ def __init__(self, config: PatchTSTConfig, distribution_output=None): super().__init__() self.y_range = config.output_range self.use_cls_token = config.use_cls_token self.pooling_type = config.pooling_type self.distribution_output = distribution_output head_dim = config.num_input_channels * config.d_model self.flatten = nn.Flatten(start_dim=1) self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() if distribution_output is None: self.projection = nn.Linear(head_dim, config.num_targets) else: self.projection = distribution_output.get_parameter_projection(head_dim) def forward(self, embedding: torch.Tensor): """ Parameters: embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): Embedding from the model Returns: `torch.Tensor` of shape `(bs, output_dim)` """ if self.use_cls_token: # use the first output token, pooled_embedding: [bs x num_channels x d_model] pooled_embedding = embedding[:, :, 0, :] elif self.pooling_type == "mean": # pooled_embedding: [bs x num_channels x d_model] pooled_embedding = embedding.mean(dim=2) elif self.pooling_type == "max": # pooled_embedding: [bs x num_channels x d_model] pooled_embedding = embedding.max(dim=2).values else: raise ValueError(f"pooling operator {self.pooling_type} is not implemented yet") # flatten the input # pooled_embedding: bs x (num_channels * d_model) pooled_embedding = self.dropout(self.flatten(pooled_embedding)) # projection # output: bs x output_dim or a tuple of this shape for distribution head output = self.projection(pooled_embedding) # apply sigmoid to bound the output if required if (self.distribution_output is None) & (self.y_range is not None): # linear head output = torch.sigmoid(output) * (self.y_range[1] - self.y_range[0]) + self.y_range[0] return output @add_start_docstrings( "The PatchTST for regression model.", PATCHTST_START_DOCSTRING, ) class PatchTSTForRegression(PatchTSTPreTrainedModel): def __init__(self, config: PatchTSTConfig): super().__init__(config) # Turn off masking if config.do_mask_input: logger.warning("Setting `do_mask_input` parameter to False.") config.do_mask_input = False self.model = PatchTSTModel(config) if config.loss == "mse": self.distribution_output = None else: if config.distribution_output == "student_t": self.distribution_output = StudentTOutput(dim=config.num_targets) elif config.distribution_output == "normal": self.distribution_output = NormalOutput(dim=config.num_targets) elif config.distribution_output == "negative_binomial": self.distribution_output = NegativeBinomialOutput(dim=config.num_targets) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") self.head = PatchTSTRegressionHead(config, self.distribution_output) # Initialize weights and apply final processing self.post_init() def forward( self, past_values: torch.Tensor, target_values: torch.Tensor = None, past_observed_mask: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, PatchTSTForRegressionOutput]: r""" Parameters: past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): Input sequence to the model target_values (`torch.Tensor` of shape `(bs, num_input_channels)`): Target values associates with the `past_values` past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers output_attentions (`bool`, *optional*): Whether or not to return the output attention of all layers return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. Returns: `PatchTSTForRegressionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False) Examples: ```python >>> from transformers import PatchTSTConfig, PatchTSTForRegression >>> # Regression task with 6 input channels and regress 2 targets >>> model = PatchTSTForRegression.from_pretrained("namctin/patchtst_etth1_regression") >>> # during inference, one only provides past values, the model outputs future values >>> past_values = torch.randn(20, 512, 6) >>> outputs = model(past_values=past_values) >>> regression_outputs = outputs.regression_outputs ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict model_output = self.model( past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True, ) # get output head. y_hat is of shape [bs x num_targets] or tuple of this shape y_hat = self.head(model_output.last_hidden_state) loss = None if target_values is not None: if self.distribution_output: distribution = self.distribution_output.distribution(y_hat) # y_hat should be a 2-tuple, each with dimension [bs, num_targets] y_hat = tuple([item.view(-1, self.config.num_targets) for item in y_hat]) loss = nll(distribution, target_values) # take average of the loss loss = weighted_average(loss) else: loss = nn.MSELoss(reduction="mean") loss = loss(y_hat, target_values) if not return_dict: # hidden_states, attentions, mask outputs = (y_hat,) + model_output[1:-3] outputs = (loss,) + outputs if loss is not None else outputs return outputs return PatchTSTForRegressionOutput( loss=loss, regression_outputs=y_hat, hidden_states=model_output.hidden_states, attentions=model_output.attentions, ) def generate( self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, ) -> SamplePatchTSTOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Parameters: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the future. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Return: [`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, num_targets)`. """ # get number of samples num_parallel_samples = self.config.num_parallel_samples # get model output outputs = self( past_values=past_values, target_values=None, past_observed_mask=past_observed_mask, output_hidden_states=False, ) # get distribution distribution = self.distribution_output.distribution(outputs.regression_outputs) # get samples: list of [bs x num_targets] samples = [distribution.sample() for _ in range(num_parallel_samples)] # samples: [bs x num_samples x num_targets] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSTOutput(sequences=samples)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/patchtst/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_patchtst": [ "PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP", "PatchTSTConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_patchtst"] = [ "PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST", "PatchTSTModel", "PatchTSTPreTrainedModel", "PatchTSTForPrediction", "PatchTSTForPretraining", "PatchTSTForRegression", "PatchTSTForClassification", ] if TYPE_CHECKING: from .configuration_patchtst import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_patchtst import ( PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST, PatchTSTForClassification, PatchTSTForPrediction, PatchTSTForPretraining, PatchTSTForRegression, PatchTSTModel, PatchTSTPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/patchtst/configuration_patchtst.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PatchTST model configuration""" from typing import List, Optional, Union from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class PatchTSTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an PatchTST model according to the specified arguments, defining the model architecture. [ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture. Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_input_channels (`int`, *optional*, defaults to 1): The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of multivariate targets. context_length (`int`, *optional*, defaults to 32): The context length of the input sequence. distribution_output (`str`, *optional*, defaults to `"student_t"`): The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or "negative_binomial". loss (`str`, *optional*, defaults to `"mse"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared error "mse". patch_length (`int`, *optional*, defaults to 1): Define the patch length of the patchification process. patch_stride (`int`, *optional*, defaults to 1): Define the stride of the patchification process. num_hidden_layers (`int`, *optional*, defaults to 3): Number of hidden layers. d_model (`int`, *optional*, defaults to 128): Dimensionality of the transformer layers. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. share_embedding (`bool`, *optional*, defaults to `True`): Sharing the input embedding across all channels. channel_attention (`bool`, *optional*, defaults to `False`): Activate channel attention block in the Transformer to allow channels to attend each other. ffn_dim (`int`, *optional*, defaults to 512): Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder. norm_type (`str` , *optional*, defaults to `"batchnorm"`): Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`. norm_eps (`float`, *optional*, defaults to 1e-05): A value added to the denominator for numerical stability of normalization. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the attention probabilities. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the Transformer. positional_dropout (`float`, *optional*, defaults to 0.0): The dropout probability in the positional embedding layer. path_dropout (`float`, *optional*, defaults to 0.0): The dropout path in the residual block. ff_dropout (`float`, *optional*, defaults to 0.0): The dropout probability used between the two layers of the feed-forward networks. bias (`bool`, *optional*, defaults to `True`): Whether to add bias in the feed-forward networks. activation_function (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported. pre_norm (`bool`, *optional*, defaults to `True`): Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is applied after residual block. positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): Positional encodings. Options `"random"` and `"sincos"` are supported. use_cls_token (`bool`, *optional*, defaults to `False`): Whether cls token is used. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. share_projection (`bool`, *optional*, defaults to `True`): Sharing the projection layer across different channels in the forecast head. scaling (`Union`, *optional*, defaults to `"std"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". do_mask_input (`bool`, *optional*): Apply masking during the pretraining. mask_type (`str`, *optional*, defaults to `"random"`): Masking type. Only `"random"` and `"forecast"` are currently supported. random_mask_ratio (`float`, *optional*, defaults to 0.5): Masking ratio applied to mask the input data during random pretraining. num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`): Number of patches to be masked at the end of each batch sample. If it is an integer, all the samples in the batch will have the same number of masked patches. If it is a list, samples in the batch will be randomly masked by numbers defined in the list. This argument is only used for forecast pretraining. channel_consistent_masking (`bool`, *optional*, defaults to `False`): If channel consistent masking is True, all the channels will have the same masking pattern. unmasked_channel_indices (`list`, *optional*): Indices of channels that are not masked during pretraining. Values in the list are number between 1 and `num_input_channels` mask_value (`int`, *optional*, defaults to 0): Values in the masked patches will be filled by `mask_value`. pooling_type (`str`, *optional*, defaults to `"mean"`): Pooling of the embedding. `"mean"`, `"max"` and `None` are supported. head_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for head. prediction_length (`int`, *optional*, defaults to 24): The prediction horizon that the model will output. num_targets (`int`, *optional*, defaults to 1): Number of targets for regression and classification tasks. For classification, it is the number of classes. output_range (`list`, *optional*): Output range for regression task. The range of output values can be set to enforce the model to produce values within a range. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples is generated in parallel for probabilistic prediction. ```python >>> from transformers import PatchTSTConfig, PatchTSTModel >>> # Initializing an PatchTST configuration with 12 time steps for prediction >>> configuration = PatchTSTConfig(prediction_length=12) >>> # Randomly initializing a model (with random weights) from the configuration >>> model = PatchTSTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "patchtst" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "num_attention_heads", "num_hidden_layers": "num_hidden_layers", } def __init__( self, # time series specific configuration num_input_channels: int = 1, context_length: int = 32, distribution_output: str = "student_t", loss: str = "mse", # PatchTST arguments patch_length: int = 1, patch_stride: int = 1, # Transformer architecture configuration num_hidden_layers: int = 3, d_model: int = 128, num_attention_heads: int = 4, share_embedding: bool = True, channel_attention: bool = False, ffn_dim: int = 512, norm_type: str = "batchnorm", norm_eps: float = 1e-05, attention_dropout: float = 0.0, dropout: float = 0.0, positional_dropout: float = 0.0, path_dropout: float = 0.0, ff_dropout: float = 0.0, bias: bool = True, activation_function: str = "gelu", pre_norm: bool = True, positional_encoding_type: str = "sincos", use_cls_token: bool = False, init_std: float = 0.02, share_projection: bool = True, scaling: Optional[Union[str, bool]] = "std", # mask pretraining do_mask_input: Optional[bool] = None, mask_type: str = "random", random_mask_ratio: float = 0.5, num_forecast_mask_patches: Optional[Union[List[int], int]] = [2], channel_consistent_masking: Optional[bool] = False, unmasked_channel_indices: Optional[List[int]] = None, mask_value: int = 0, # head pooling_type: str = "mean", head_dropout: float = 0.0, prediction_length: int = 24, num_targets: int = 1, output_range: Optional[List] = None, # distribution head num_parallel_samples: int = 100, **kwargs, ): # time series specific configuration self.context_length = context_length self.num_input_channels = num_input_channels # n_vars self.loss = loss self.distribution_output = distribution_output self.num_parallel_samples = num_parallel_samples # Transformer architecture configuration self.d_model = d_model self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.num_hidden_layers = num_hidden_layers self.dropout = dropout self.attention_dropout = attention_dropout self.share_embedding = share_embedding self.channel_attention = channel_attention self.norm_type = norm_type self.norm_eps = norm_eps self.positional_dropout = positional_dropout self.path_dropout = path_dropout self.ff_dropout = ff_dropout self.bias = bias self.activation_function = activation_function self.pre_norm = pre_norm self.positional_encoding_type = positional_encoding_type self.use_cls_token = use_cls_token self.init_std = init_std self.scaling = scaling # PatchTST parameters self.patch_length = patch_length self.patch_stride = patch_stride # Mask pretraining self.do_mask_input = do_mask_input self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio # for random masking self.num_forecast_mask_patches = num_forecast_mask_patches # for forecast masking self.channel_consistent_masking = channel_consistent_masking self.unmasked_channel_indices = unmasked_channel_indices self.mask_value = mask_value # general head params self.pooling_type = pooling_type self.head_dropout = head_dropout # For prediction head self.share_projection = share_projection self.prediction_length = prediction_length # For prediction and regression head self.num_parallel_samples = num_parallel_samples # Regression self.num_targets = num_targets self.output_range = output_range super().__init__(**kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/starcoder2/modeling_starcoder2.py
# coding=utf-8 # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Starcoder2 model.""" import inspect import math import warnings from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings, ) from .configuration_starcoder2 import Starcoder2Config if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "Starcoder2Config" # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Starcoder2 class Starcoder2RotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class Starcoder2MLP(nn.Module): def __init__(self, config: Starcoder2Config): super().__init__() embed_dim = config.hidden_size self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias) self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias) self.act = ACT2FN[config.hidden_act] self.residual_dropout = config.residual_dropout def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training) return hidden_states # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class Starcoder2Attention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer and "Generating Long Sequences with Sparse Transformers". """ def __init__(self, config: Starcoder2Config, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.use_bias = config.use_bias self.is_causal = True self.attention_dropout = config.attention_dropout self.residual_dropout = config.residual_dropout if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=self.use_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=self.use_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=self.use_bias) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=self.use_bias) self.rotary_emb = Starcoder2RotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: if self.layer_idx is None: raise ValueError( f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " "with a layer index." ) kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Starcoder2 class Starcoder2FlashAttention2(Starcoder2Attention): """ Starcoder2 flash attention module. This module inherits from `Starcoder2Attention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() # Ignore copy def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ): if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) # overwrite attention_mask with padding_mask attention_mask = kwargs.pop("padding_mask") bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: if self.layer_idx is None: raise ValueError( f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " "with a layer index." ) kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) # Because the input can be padded, the absolute sequence length depends on the max position id. rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1 cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) use_sliding_windows = ( _flash_supports_window_size and getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window ) if not _flash_supports_window_size: logger.warning_once( "The current flash attention version does not support sliding window attention, for a more memory efficient implementation" " make sure to upgrade flash-attn library." ) if past_key_value is not None: # Activate slicing cache only if the config has a value `sliding_windows` attribute cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 if ( getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window and cache_has_contents ): slicing_tokens = 1 - self.config.sliding_window past_key = past_key_value[self.layer_idx][0] past_value = past_key_value[self.layer_idx][1] past_key = past_key[:, :, slicing_tokens:, :].contiguous() past_value = past_value[:, :, slicing_tokens:, :].contiguous() if past_key.shape[-2] != self.config.sliding_window - 1: raise ValueError( f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" f" {past_key.shape}" ) if attention_mask is not None: attention_mask = attention_mask[:, slicing_tokens:] attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) # Reashape to the expected shape for Flash Attention query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, use_sliding_windows=use_sliding_windows, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.o_proj(attn_output) attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None, use_sliding_windows=False, ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`float`): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) use_sliding_windows (`bool`, *optional*): Whether to activate sliding window attention. """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens if not use_sliding_windows: attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) else: attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, window_size=(self.config.sliding_window, self.config.sliding_window), ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: if not use_sliding_windows: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal, ) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal, window_size=(self.config.sliding_window, self.config.sliding_window), ) return attn_output def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape # On the first iteration we need to properly re-create the padding mask # by slicing it on the proper place if kv_seq_len != attention_mask.shape[-1]: attention_mask_num_tokens = attention_mask.shape[-1] attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) # Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Starcoder2 class Starcoder2SdpaAttention(Starcoder2Attention): """ Starcoder2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `Starcoder2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Ignore copy def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "Starcoder2Model is using Starcoder2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and attention_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.attention_dropout if self.training else 0.0, # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. is_causal=self.is_causal and attention_mask is None and q_len > 1, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) # The difference with Mistral is that here it uses dropout attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training) return attn_output, None, past_key_value STARCODER2_ATTENTION_CLASSES = { "eager": Starcoder2Attention, "flash_attention_2": Starcoder2FlashAttention2, "sdpa": Starcoder2SdpaAttention, } class Starcoder2DecoderLayer(nn.Module): def __init__(self, config: Starcoder2Config, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = STARCODER2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) self.mlp = Starcoder2MLP(config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) # Copied from transformers.models.mistral.modeling_mistral.MistralDecoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs STARCODER2_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Starcoder2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.", STARCODER2_START_DOCSTRING, ) # Copied from transformers.models.mistral.modeling_mistral.MistralPreTrainedModel with Mistral->Starcoder2 class Starcoder2PreTrainedModel(PreTrainedModel): config_class = Starcoder2Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Starcoder2DecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() STARCODER2_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. Two formats are allowed: - a [`~cache_utils.Cache`] instance; - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy cache format. The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the legacy cache format will be returned. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.", STARCODER2_START_DOCSTRING, ) class Starcoder2Model(Starcoder2PreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Starcoder2DecoderLayer`] Args: config: Starcoder2Config """ def __init__(self, config: Starcoder2Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.embedding_dropout = config.embedding_dropout self.layers = nn.ModuleList( [Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False past_key_values_length = 0 if use_cache: use_legacy_cache = not isinstance(past_key_values, Cache) if use_legacy_cache: past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_usable_length(seq_length) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache: is_padding_right = attention_mask[:, -1].sum().item() != batch_size if is_padding_right: raise ValueError( "You are attempting to perform batched generation with padding_side='right'" " this may lead to unexpected behaviour for Flash Attention version of Starcoder2. Make sure to " " call `tokenizer.padding_side = 'left'` before tokenizing the input. " ) if self._attn_implementation == "flash_attention_2": # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self._attn_implementation == "sdpa" and not output_attentions: # output_attentions=True can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, sliding_window=self.config.sliding_window, ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, sliding_window=self.config.sliding_window, ) hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.embedding_dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, past_key_values, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Copied from transformers.models.mistral.modeling_mistral.MistralForCausalLM with MISTRAL->STARCODER2,Mistral-7B-v0.1->starcoder2-7b_16k,Mistral->Starcoder2,mistralai->bigcode class Starcoder2ForCausalLM(Starcoder2PreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = Starcoder2Model(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, Starcoder2ForCausalLM >>> model = Starcoder2ForCausalLM.from_pretrained("bigcode/starcoder2-7b_16k") >>> tokenizer = AutoTokenizer.from_pretrained("bigcode/starcoder2-7b_16k") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits.float() loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Ensure tensors are on the same device shift_labels = shift_labels.to(shift_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): # Omit tokens covered by past_key_values if past_key_values is not None: if isinstance(past_key_values, Cache): cache_length = past_key_values.get_seq_length() past_length = past_key_values.seen_tokens max_cache_length = past_key_values.get_max_length() else: cache_length = past_length = past_key_values[0][0].shape[2] max_cache_length = None # Keep only the unprocessed tokens: # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as # input) if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard # input_ids based on the past_length. elif past_length < input_ids.shape[1]: input_ids = input_ids[:, past_length:] # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. if ( max_cache_length is not None and attention_mask is not None and cache_length + input_ids.shape[1] > max_cache_length ): attention_mask = attention_mask[:, -max_cache_length:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings( """ The Starcoder2 Model transformer with a sequence classification head on top (linear layer). [`Starcoder2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, STARCODER2_START_DOCSTRING, ) # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Starcoder2, LLAMA->STARCODER2 class Starcoder2ForSequenceClassification(Starcoder2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = Starcoder2Model(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/starcoder2/__init__.py
# Copyright 2024 BigCode and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _import_structure = { "configuration_starcoder2": ["STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Starcoder2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_starcoder2"] = [ "Starcoder2ForCausalLM", "Starcoder2Model", "Starcoder2PreTrainedModel", "Starcoder2ForSequenceClassification", ] if TYPE_CHECKING: from .configuration_starcoder2 import STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP, Starcoder2Config try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_starcoder2 import ( Starcoder2ForCausalLM, Starcoder2ForSequenceClassification, Starcoder2Model, Starcoder2PreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/starcoder2/configuration_starcoder2.py
# coding=utf-8 # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Starcoder2 model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class Starcoder2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Starcoder2Model`]. It is used to instantiate a Starcoder2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [bigcode/starcoder2-7b_16k](https://huggingface.co/bigcode/starcoder2-7b_16k) model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49152): Vocabulary size of the Starcoder2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Starcoder2Model`] hidden_size (`int`, *optional*, defaults to 3072): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 12288): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 30): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 24): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 2): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. Starcoder2's sliding window attention allows sequence of up to 4096*32 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. norm_epsilon (`float`, *optional*, defaults to 1e-05): Epsilon value for the layer norm use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. bos_token_id (`int`, *optional*, defaults to 50256): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 50256): The id of the "end-of-sequence" token. rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. sliding_window (`int`, *optional*): Sliding window attention window size. If not specified, will default to `None` (no sliding window). attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. residual_dropout (`float`, *optional*, defaults to 0.0): Residual connection dropout value. embedding_dropout (`float`, *optional*, defaults to 0.0): Embedding dropout. use_bias (`bool`, *optional*, defaults to `True`): Whether to use bias term on linear layers of the model. ```python >>> from transformers import Starcoder2Model, Starcoder2Config >>> # Initializing a Starcoder2 7B style configuration >>> configuration = Starcoder2Config() >>> # Initializing a model from the Starcoder2 7B style configuration >>> model = Starcoder2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "starcoder2" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=49152, hidden_size=3072, intermediate_size=12288, num_hidden_layers=30, num_attention_heads=24, num_key_value_heads=2, hidden_act="gelu_pytorch_tanh", max_position_embeddings=4096, initializer_range=0.018042, norm_epsilon=1e-5, use_cache=True, bos_token_id=50256, eos_token_id=50256, rope_theta=10000.0, sliding_window=None, attention_dropout=0.0, residual_dropout=0.0, embedding_dropout=0.0, use_bias=True, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window self.use_bias = use_bias self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.norm_epsilon = norm_epsilon self.use_cache = use_cache self.rope_theta = rope_theta self.attention_dropout = attention_dropout self.residual_dropout = residual_dropout self.embedding_dropout = embedding_dropout super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/dit/convert_dit_unilm_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DiT checkpoints from the unilm repository.""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, has_lm_head=False, is_semantic=False): prefix = "backbone." if is_semantic else "" rename_keys = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias")) # projection layer + position embeddings rename_keys.extend( [ (f"{prefix}cls_token", "beit.embeddings.cls_token"), (f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"), (f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"), (f"{prefix}pos_embed", "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False): for i in range(config.num_hidden_layers): prefix = "backbone." if is_semantic else "" # queries, keys and values in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight") q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias") v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias") state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ : config.hidden_size, : ] state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1") gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2") state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1 state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2 def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_dit_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our BEiT structure. """ # define default BEiT configuration has_lm_head = False if "rvlcdip" in checkpoint_url else True config = BeitConfig(use_absolute_position_embeddings=True, use_mask_token=has_lm_head) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 # labels if "rvlcdip" in checkpoint_url: config.num_labels = 16 repo_id = "huggingface/label-files" filename = "rvlcdip-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} # load state_dict of original model, remove and rename some keys state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"] rename_keys = create_rename_keys(config, has_lm_head=has_lm_head) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head) # load HuggingFace model model = BeitForMaskedImageModeling(config) if has_lm_head else BeitForImageClassification(config) model.eval() model.load_state_dict(state_dict) # Check outputs on an image image_processor = BeitImageProcessor( size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False ) image = prepare_img() encoding = image_processor(images=image, return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values) logits = outputs.logits # verify logits expected_shape = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(expected_shape), "Shape of logits not as expected" Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: if has_lm_head: model_name = "dit-base" if "base" in checkpoint_url else "dit-large" else: model_name = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization="nielsr", commit_message="Add image processor", use_temp_dir=True, ) model.push_to_hub( repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization="nielsr", commit_message="Add model", use_temp_dir=True, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) args = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/rwkv/modeling_rwkv.py
# coding=utf-8 # Copyright 2023 Bo Peng and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch RWKV model.""" import math from dataclasses import dataclass from pathlib import Path from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_bitsandbytes_available, is_ninja_available, is_torch_cuda_available, logging, ) from .configuration_rwkv import RwkvConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "RWKV/rwkv-4-169m-pile" _CONFIG_FOR_DOC = "RwkvConfig" from ..deprecated._archive_maps import RWKV_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 rwkv_cuda_kernel = None def load_wkv_cuda_kernel(context_length): from torch.utils.cpp_extension import load as load_kernel global rwkv_cuda_kernel kernel_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "rwkv" cuda_kernel_files = [kernel_folder / f for f in ["wkv_op.cpp", "wkv_cuda.cu", "wkv_cuda_bf16.cu"]] # Only load the kernel if it's not been loaded yet or if we changed the context length if rwkv_cuda_kernel is not None and rwkv_cuda_kernel.max_seq_length == context_length: return logger.info(f"Loading CUDA kernel for RWKV at context length of {context_length}.") flags = [ "-res-usage", "--maxrregcount 60", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-DTmax={context_length}", ] rwkv_cuda_kernel = load_kernel( name=f"wkv_{context_length}", sources=cuda_kernel_files, verbose=(logging.get_verbosity() == logging.DEBUG), extra_cuda_cflags=flags, ) rwkv_cuda_kernel.max_seq_length = context_length class RwkvLinearAttention(torch.autograd.Function): @staticmethod def forward(ctx, time_decay, time_first, key, value, state=None, return_state=False): batch_size, seq_len, hidden_size = key.size() if seq_len > rwkv_cuda_kernel.max_seq_length: raise ValueError( f"Cannot process a batch with {seq_len} tokens at the same time, use a maximum of " f"{rwkv_cuda_kernel.max_seq_length} with this model." ) if batch_size * hidden_size % min(hidden_size, 32) != 0: raise ValueError( f"The product of batch size ({batch_size}) and hidden size ({hidden_size}) needs to be a round " f"multiple of {min(hidden_size, 32)}." ) ctx.input_dtype = key.dtype if ( time_decay.device.type != "cuda" or time_first.device.type != "cuda" or key.device.type != "cuda" or value.device.type != "cuda" ): raise ValueError("Calling the CUDA kernel for wkv attention requires all tensors to be on CUDA devices.") time_decay = -torch.exp(time_decay.float().contiguous()) if key.dtype == torch.float16: time_first = time_first.float() key = key.float() value = value.float() time_first = time_first.contiguous() key = key.contiguous() value = value.contiguous() # The CUDA kernel will fill this tensor. output = torch.empty_like(key, memory_format=torch.contiguous_format) if return_state or state is not None: if state is None: state = torch.zeros( batch_size, hidden_size, 3, dtype=torch.float32, device=key.device, memory_format=torch.contiguous_format, ) state[:, :, 2] -= 1e38 else: state = torch.cat([s.unsqueeze(2) for s in state], dim=2).contiguous() if key.dtype == torch.bfloat16: forward_func = rwkv_cuda_kernel.forward_with_state_bf16 else: forward_func = rwkv_cuda_kernel.forward_with_state forward_func(time_decay, time_first, key, value, output, state) else: forward_func = rwkv_cuda_kernel.forward_bf16 if key.dtype == torch.bfloat16 else rwkv_cuda_kernel.forward forward_func(time_decay, time_first, key, value, output) ctx.save_for_backward(time_decay, time_first, key, value, output) if state is not None: state = [s.squeeze(2) for s in torch.chunk(state, 3, dim=2)] return output.to(ctx.input_dtype), state @staticmethod # g stands for grad def backward(ctx, g_output, g_state=None): input_dtype = ctx.input_dtype time_decay, time_first, key, value, output = ctx.saved_tensors # The CUDA kernel will fill those tensors. g_time_decay = torch.empty_like( time_decay, memory_format=torch.contiguous_format, dtype=torch.bfloat16 if input_dtype == torch.bfloat16 else torch.float32, ) g_time_first = torch.empty_like(time_first, memory_format=torch.contiguous_format) g_key = torch.empty_like(key, memory_format=torch.contiguous_format) g_value = torch.empty_like(value, memory_format=torch.contiguous_format) if input_dtype == torch.float16: g_output = g_output.float() backward_func = rwkv_cuda_kernel.backward_bf16 if input_dtype == torch.bfloat16 else rwkv_cuda_kernel.backward backward_func( time_decay, time_first, key, value, output, g_output.contiguous(), g_time_decay, g_time_first, g_key, g_value, ) return ( g_time_decay.to(input_dtype), g_time_first.to(input_dtype), g_key.to(input_dtype), g_value.to(input_dtype), None, None, ) def rwkv_linear_attention_cpu(time_decay, time_first, key, value, state=None, return_state=False): # For CPU fallback. Will be slower and probably take more memory than the custom CUDA kernel if not executed # within a torch.no_grad. _, seq_length, _ = key.size() output = torch.zeros_like(key) if state is None: num_state = torch.zeros_like(key[:, 0], dtype=torch.float32) den_state = torch.zeros_like(key[:, 0], dtype=torch.float32) max_state = torch.zeros_like(key[:, 0], dtype=torch.float32) - 1e38 else: num_state, den_state, max_state = state # For numerical stability # real_numerator_state = num_state * torch.exp(max_state) # real_denominator_state = den_state * torch.exp(max_state) time_decay = -torch.exp(time_decay) for current_index in range(seq_length): current_key = key[:, current_index].float() current_value = value[:, current_index] # wkv computation at time t max_for_output = torch.maximum(max_state, current_key + time_first) e1 = torch.exp(max_state - max_for_output) e2 = torch.exp(current_key + time_first - max_for_output) numerator = e1 * num_state + e2 * current_value denominator = e1 * den_state + e2 output[:, current_index] = (numerator / denominator).to(output.dtype) # Update state for next iteration max_for_state = torch.maximum(max_state + time_decay, current_key) e1 = torch.exp(max_state + time_decay - max_for_state) e2 = torch.exp(current_key - max_for_state) num_state = e1 * num_state + e2 * current_value den_state = e1 * den_state + e2 max_state = max_for_state if return_state or state is not None: state = [num_state, den_state, max_state] return output, state def rwkv_linear_attention(time_decay, time_first, key, value, state=None, return_state=False): no_cuda = any(t.device.type != "cuda" for t in [time_decay, time_first, key, value]) # Launching the CUDA kernel for just one token will actually be slower (there is no for loop in the CPU version # in this case). one_token = key.size(1) == 1 if rwkv_cuda_kernel is None or no_cuda or one_token: return rwkv_linear_attention_cpu(time_decay, time_first, key, value, state=state, return_state=return_state) else: return RwkvLinearAttention.apply(time_decay, time_first, key, value, state, return_state) class RwkvSelfAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.config = config kernel_loaded = rwkv_cuda_kernel is not None and rwkv_cuda_kernel.max_seq_length == config.context_length if is_ninja_available() and is_torch_cuda_available() and not kernel_loaded: try: load_wkv_cuda_kernel(config.context_length) except Exception: logger.info("Could not load the custom CUDA kernel for RWKV attention.") self.layer_id = layer_id hidden_size = config.hidden_size attention_hidden_size = ( config.attention_hidden_size if config.attention_hidden_size is not None else hidden_size ) self.attention_hidden_size = attention_hidden_size self.time_decay = nn.Parameter(torch.empty(attention_hidden_size)) self.time_first = nn.Parameter(torch.empty(attention_hidden_size)) self.time_mix_key = nn.Parameter(torch.empty(1, 1, hidden_size)) self.time_mix_value = nn.Parameter(torch.empty(1, 1, hidden_size)) self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, hidden_size)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.key = nn.Linear(hidden_size, attention_hidden_size, bias=False) self.value = nn.Linear(hidden_size, attention_hidden_size, bias=False) self.receptance = nn.Linear(hidden_size, attention_hidden_size, bias=False) self.output = nn.Linear(attention_hidden_size, hidden_size, bias=False) # TODO: maybe jit, otherwise move inside forward def extract_key_value(self, hidden, state=None): # Mix hidden with the previous timestep to produce key, value, receptance if hidden.size(1) == 1 and state is not None: shifted = state[1][:, :, self.layer_id] else: shifted = self.time_shift(hidden) if state is not None: shifted[:, 0] = state[1][:, :, self.layer_id] key = hidden * self.time_mix_key + shifted * (1 - self.time_mix_key) value = hidden * self.time_mix_value + shifted * (1 - self.time_mix_value) receptance = hidden * self.time_mix_receptance + shifted * (1 - self.time_mix_receptance) key = self.key(key) value = self.value(value) receptance = torch.sigmoid(self.receptance(receptance)) if state is not None: state[1][:, :, self.layer_id] = hidden[:, -1] return receptance, key, value, state def forward(self, hidden, state=None, use_cache=False): receptance, key, value, state = self.extract_key_value(hidden, state=state) layer_state = tuple(s[:, :, self.layer_id] for s in state[2:]) if state is not None else None rwkv, layer_state = rwkv_linear_attention( self.time_decay, self.time_first, key, value, state=layer_state, return_state=use_cache, ) if layer_state is not None: state[2][:, :, self.layer_id] = layer_state[0] state[3][:, :, self.layer_id] = layer_state[1] state[4][:, :, self.layer_id] = layer_state[2] return self.output(receptance * rwkv), state class RwkvFeedForward(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.config = config self.layer_id = layer_id hidden_size = config.hidden_size intermediate_size = ( config.intermediate_size if config.intermediate_size is not None else 4 * config.hidden_size ) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.time_mix_key = nn.Parameter(torch.empty(1, 1, hidden_size)) self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, hidden_size)) self.key = nn.Linear(hidden_size, intermediate_size, bias=False) self.receptance = nn.Linear(hidden_size, hidden_size, bias=False) self.value = nn.Linear(intermediate_size, hidden_size, bias=False) def forward(self, hidden, state=None): if hidden.size(1) == 1 and state is not None: shifted = state[0][:, :, self.layer_id] else: shifted = self.time_shift(hidden) if state is not None: shifted[:, 0] = state[0][:, :, self.layer_id] key = hidden * self.time_mix_key + shifted * (1 - self.time_mix_key) receptance = hidden * self.time_mix_receptance + shifted * (1 - self.time_mix_receptance) key = torch.square(torch.relu(self.key(key))) value = self.value(key) receptance = torch.sigmoid(self.receptance(receptance)) if state is not None: state[0][:, :, self.layer_id] = hidden[:, -1] return receptance * value, state class RwkvBlock(nn.Module): def __init__(self, config, layer_id): super().__init__() self.config = config self.layer_id = layer_id if layer_id == 0: self.pre_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.ln1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.ln2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.attention = RwkvSelfAttention(config, layer_id) self.feed_forward = RwkvFeedForward(config, layer_id) def forward(self, hidden, state=None, use_cache=False, output_attentions=False): if self.layer_id == 0: hidden = self.pre_ln(hidden) attention, state = self.attention(self.ln1(hidden), state=state, use_cache=use_cache) hidden = hidden + attention feed_forward, state = self.feed_forward(self.ln2(hidden), state=state) hidden = hidden + feed_forward outputs = (hidden, state) if output_attentions: outputs += (attention,) else: outputs += (None,) return outputs class RwkvPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RwkvConfig base_model_prefix = "rwkv" _no_split_modules = ["RwkvBlock"] _keep_in_fp32_modules = ["time_decay", "time_first"] supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, RwkvSelfAttention): layer_id = module.layer_id num_hidden_layers = module.config.num_hidden_layers hidden_size = module.config.hidden_size attention_hidden_size = module.attention_hidden_size ratio_0_to_1 = layer_id / (num_hidden_layers - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / num_hidden_layers) # 1 to ~0 time_weight = torch.tensor( [i / hidden_size for i in range(hidden_size)], dtype=module.time_mix_key.dtype, device=module.time_mix_key.device, ) time_weight = time_weight[None, None, :] decay_speed = [ -5 + 8 * (h / (attention_hidden_size - 1)) ** (0.7 + 1.3 * ratio_0_to_1) for h in range(attention_hidden_size) ] decay_speed = torch.tensor(decay_speed, dtype=module.time_decay.dtype, device=module.time_decay.device) zigzag = ( torch.tensor( [(i + 1) % 3 - 1 for i in range(attention_hidden_size)], dtype=module.time_first.dtype, device=module.time_first.device, ) * 0.5 ) with torch.no_grad(): module.time_decay.data = decay_speed module.time_first.data = torch.ones_like(module.time_first * math.log(0.3) + zigzag) module.time_mix_key.data = torch.pow(time_weight, ratio_1_to_almost0) module.time_mix_value.data = torch.pow(time_weight, ratio_1_to_almost0) + 0.3 * ratio_0_to_1 module.time_mix_receptance.data = torch.pow(time_weight, 0.5 * ratio_1_to_almost0) elif isinstance(module, RwkvFeedForward): layer_id = module.layer_id num_hidden_layers = module.config.num_hidden_layers hidden_size = module.config.hidden_size ratio_1_to_almost0 = 1.0 - (layer_id / num_hidden_layers) # 1 to ~0 time_weight = torch.tensor( [i / hidden_size for i in range(hidden_size)], dtype=module.time_mix_key.dtype, device=module.time_mix_key.device, ) time_weight = time_weight[None, None, :] with torch.no_grad(): module.time_mix_key.data = torch.pow(time_weight, ratio_1_to_almost0) module.time_mix_receptance.data = torch.pow(time_weight, ratio_1_to_almost0) @dataclass class RwkvOutput(ModelOutput): """ Class for the RWKV model outputs. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None state: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class RwkvCausalLMOutput(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`): The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to avoid providing the old `input_ids`. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None state: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None RWKV_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RwkvConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ RWKV_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. This is currently not used by `RwkvModel`, but will be supported in the future. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. state (tuple of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`, *optional*): If passed along, the model uses the previous state in all the blocks (which will give the output for the `input_ids` provided as if the model add `state_input_ids + input_ids` as context). use_cache (`bool`, *optional*): If set to `True`, the last state is returned and can be used to quickly generate the next logits. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RWKV Model transformer outputting raw hidden-states without any specific head on top.", RWKV_START_DOCSTRING, ) class RwkvModel(RwkvPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.blocks = nn.ModuleList([RwkvBlock(config, layer_id=idx) for idx in range(config.num_hidden_layers)]) self.ln_out = nn.LayerNorm(config.hidden_size) self.layers_are_rescaled = False self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embeddings): self.embeddings = new_embeddings @add_start_docstrings_to_model_forward(RWKV_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=RwkvOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, # noqa inputs_embeds: Optional[torch.FloatTensor] = None, state: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, RwkvOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.training == self.layers_are_rescaled: self._rescale_layers() if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is None and inputs_embeds is None: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) if use_cache and state is None: shape = (inputs_embeds.size(0), self.config.hidden_size, self.config.num_hidden_layers) state = [ torch.zeros( *shape, dtype=inputs_embeds.dtype if i <= 1 else torch.float32, device=inputs_embeds.device ) for i in range(5) ] state[4] -= 1e30 if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False hidden_states = inputs_embeds all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for idx, block in enumerate(self.blocks): if self.gradient_checkpointing and self.training: hidden_states, state, attentions = self._gradient_checkpointing_func( block.__call__, hidden_states, state, use_cache, output_attentions ) else: hidden_states, state, attentions = block( hidden_states, state=state, use_cache=use_cache, output_attentions=output_attentions ) if ( self.layers_are_rescaled and self.config.rescale_every > 0 and (idx + 1) % self.config.rescale_every == 0 ): hidden_states = hidden_states / 2 if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if output_attentions: all_self_attentions = all_self_attentions + (attentions,) hidden_states = self.ln_out(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(x for x in [hidden_states, state, all_hidden_states, all_self_attentions] if x is not None) return RwkvOutput( last_hidden_state=hidden_states, state=state, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def _rescale_layers(self): # Layers should be rescaled for inference only. if self.layers_are_rescaled == (not self.training): return if self.config.rescale_every > 0: with torch.no_grad(): for block_id, block in enumerate(self.blocks): if self.training: block.attention.output.weight.mul_(2 ** int(block_id // self.config.rescale_every)) block.feed_forward.value.weight.mul_(2 ** int(block_id // self.config.rescale_every)) else: # Deal with quantization statistics if hasattr(block.attention.output.weight, "SCB"): block.attention.output.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every)) block.feed_forward.value.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every)) elif hasattr(block.attention.output.weight, "quant_state"): self._bnb_4bit_dequantize_and_rescale(block.attention.output, block_id) self._bnb_4bit_dequantize_and_rescale(block.feed_forward.value, block_id) else: block.attention.output.weight.div_(2 ** int(block_id // self.config.rescale_every)) block.feed_forward.value.weight.div_(2 ** int(block_id // self.config.rescale_every)) self.layers_are_rescaled = not self.training def _bnb_4bit_dequantize_and_rescale(self, target_layer, block_id): r""" Perform the dequantization and rescaling of the weights of a given layer. After that operation the layer will be quantized again. """ if not is_bitsandbytes_available(): raise ImportError("Please install bitsandbytes to use this method.") import bitsandbytes as bnb dequant_weights = bnb.functional.dequantize_4bit(target_layer.weight.data, target_layer.weight.quant_state) dequant_weights.div_(2 ** int(block_id // self.config.rescale_every)) # re-quantize the model: # we need to put it first on CPU then back to the device # this will create an overhead :/ # We set requires_grad=False as we cannot compute gradients on top of 4bit parameters anyway and to avoid # bugs with bnb quant_weight = bnb.nn.Params4bit(dequant_weights.to("cpu"), requires_grad=False).to(dequant_weights.device) setattr(target_layer, "weight", quant_weight) @add_start_docstrings( """ The RWKV Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, RWKV_START_DOCSTRING, ) class RwkvForCausalLM(RwkvPreTrainedModel): _tied_weights_keys = ["head.weight"] def __init__(self, config): super().__init__(config) self.rwkv = RwkvModel(config) self.head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.head def set_output_embeddings(self, new_embeddings): self.head = new_embeddings def generate(self, *args, **kwargs): # Thin wrapper to raise exceptions when trying to generate with methods that manipulate `past_key_values`. # RWKV is one of the few models that don't have it (it has `state` instead, which has different properties and # usage). try: gen_output = super().generate(*args, **kwargs) except AttributeError as exc: # Expected exception: "AttributeError: '(object name)' object has no attribute 'past_key_values'" if "past_key_values" in str(exc): raise AttributeError( "You tried to call `generate` with a decoding strategy that manipulates `past_key_values`. RWKV " "doesn't have that attribute, try another generation strategy instead. For the available " "generation strategies, check this doc: https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies" ) else: raise exc return gen_output def prepare_inputs_for_generation(self, input_ids, state=None, inputs_embeds=None, **kwargs): # only last token for inputs_ids if the state is passed along. if state is not None: input_ids = input_ids[:, -1].unsqueeze(-1) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and state is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs["state"] = state return model_inputs @add_start_docstrings_to_model_forward(RWKV_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=RwkvCausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, # noqa inputs_embeds: Optional[torch.FloatTensor] = None, state: Optional[List[torch.FloatTensor]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, RwkvCausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict rwkv_outputs = self.rwkv( input_ids, inputs_embeds=inputs_embeds, state=state, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = rwkv_outputs[0] logits = self.head(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + rwkv_outputs[1:] return ((loss,) + output) if loss is not None else output return RwkvCausalLMOutput( loss=loss, logits=logits, state=rwkv_outputs.state, hidden_states=rwkv_outputs.hidden_states, attentions=rwkv_outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/rwkv/configuration_rwkv.py
# coding=utf-8 # Copyright 2023 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RWKV configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class RwkvConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`RwkvModel`]. It is used to instantiate a RWKV model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RWVK-4 [RWKV/rwkv-4-169m-pile](https://huggingface.co/RWKV/rwkv-4-169m-pile) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50277): Vocabulary size of the RWKV model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RwkvModel`]. context_length (`int`, *optional*, defaults to 1024): The maximum sequence length that this model can be used with in a single forward (using it in RNN mode lets use any sequence length). hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the embeddings and hidden states. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the model. attention_hidden_size (`int`, *optional*): Dimensionality of the attention hidden states. Will default to `hidden_size` if unset. intermediate_size (`int`, *optional*): Dimensionality of the inner feed-forward layers. Will default to 4 times `hidden_size` if unset. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers. bos_token_id (`int`, *optional*, defaults to 0): The id of the beginning of sentence token in the vocabulary. Defaults to 0 as RWKV uses the same tokenizer as GPTNeoX. eos_token_id (`int`, *optional*, defaults to 0): The id of the end of sentence token in the vocabulary. Defaults to 0 as RWKV uses the same tokenizer as GPTNeoX. rescale_every (`int`, *optional*, defaults to 6): At inference, the hidden states (and weights of the correponding output layers) are divided by 2 every `rescale_every` layer. If set to 0 or a negative number, no rescale is done. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to tie the word embeddings with the input token embeddings. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last state. Example: ```python >>> from transformers import RwkvConfig, RwkvModel >>> # Initializing a Rwkv configuration >>> configuration = RwkvConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = RwkvModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "rwkv" attribute_map = {"max_position_embeddings": "context_length"} def __init__( self, vocab_size=50277, context_length=1024, hidden_size=4096, num_hidden_layers=32, attention_hidden_size=None, intermediate_size=None, layer_norm_epsilon=1e-5, bos_token_id=0, eos_token_id=0, rescale_every=6, tie_word_embeddings=False, use_cache=True, **kwargs, ): self.vocab_size = vocab_size self.context_length = context_length self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.attention_hidden_size = attention_hidden_size if attention_hidden_size is not None else hidden_size self.intermediate_size = intermediate_size if intermediate_size is not None else 4 * hidden_size self.layer_norm_epsilon = layer_norm_epsilon self.rescale_every = rescale_every self.use_cache = use_cache self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__( tie_word_embeddings=tie_word_embeddings, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/rwkv/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _import_structure = { "configuration_rwkv": ["RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP", "RwkvConfig", "RwkvOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_rwkv"] = [ "RWKV_PRETRAINED_MODEL_ARCHIVE_LIST", "RwkvForCausalLM", "RwkvModel", "RwkvPreTrainedModel", ] if TYPE_CHECKING: from .configuration_rwkv import RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP, RwkvConfig, RwkvOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rwkv import ( RWKV_PRETRAINED_MODEL_ARCHIVE_LIST, RwkvForCausalLM, RwkvModel, RwkvPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/rwkv/convert_rwkv_checkpoint_to_hf.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert a RWKV checkpoint from BlinkDL to the Hugging Face format.""" import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint NUM_HIDDEN_LAYERS_MAPPING = { "169M": 12, "430M": 24, "1B5": 24, "3B": 32, "7B": 32, "14B": 40, } HIDEN_SIZE_MAPPING = { "169M": 768, "430M": 1024, "1B5": 2048, "3B": 2560, "7B": 4096, "14B": 5120, } def convert_state_dict(state_dict): state_dict_keys = list(state_dict.keys()) for name in state_dict_keys: weight = state_dict.pop(name) # emb -> embedding if name.startswith("emb."): name = name.replace("emb.", "embeddings.") # ln_0 -> pre_ln (only present at block 0) if name.startswith("blocks.0.ln0"): name = name.replace("blocks.0.ln0", "blocks.0.pre_ln") # att -> attention name = re.sub(r"blocks\.(\d+)\.att", r"blocks.\1.attention", name) # ffn -> feed_forward name = re.sub(r"blocks\.(\d+)\.ffn", r"blocks.\1.feed_forward", name) # time_mix_k -> time_mix_key and reshape if name.endswith(".time_mix_k"): name = name.replace(".time_mix_k", ".time_mix_key") # time_mix_v -> time_mix_value and reshape if name.endswith(".time_mix_v"): name = name.replace(".time_mix_v", ".time_mix_value") # time_mix_r -> time_mix_key and reshape if name.endswith(".time_mix_r"): name = name.replace(".time_mix_r", ".time_mix_receptance") if name != "head.weight": name = "rwkv." + name state_dict[name] = weight return state_dict def convert_rmkv_checkpoint_to_hf_format( repo_id, checkpoint_file, output_dir, size=None, tokenizer_file=None, push_to_hub=False, model_name=None ): # 1. If possible, build the tokenizer. if tokenizer_file is None: print("No `--tokenizer_file` provided, we will use the default tokenizer.") vocab_size = 50277 tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") else: tokenizer = PreTrainedTokenizerFast(tokenizer_file=tokenizer_file) vocab_size = len(tokenizer) tokenizer.save_pretrained(output_dir) # 2. Build the config possible_sizes = list(NUM_HIDDEN_LAYERS_MAPPING.keys()) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: size = candidate break if size is None: raise ValueError("Could not infer the size, please provide it with the `--size` argument.") if size not in possible_sizes: raise ValueError(f"`size` should be one of {possible_sizes}, got {size}.") config = RwkvConfig( vocab_size=vocab_size, num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size], hidden_size=HIDEN_SIZE_MAPPING[size], ) config.save_pretrained(output_dir) # 3. Download model file then convert state_dict model_file = hf_hub_download(repo_id, checkpoint_file) state_dict = torch.load(model_file, map_location="cpu") state_dict = convert_state_dict(state_dict) # 4. Split in shards and save shards, index = shard_checkpoint(state_dict) for shard_file, shard in shards.items(): torch.save(shard, os.path.join(output_dir, shard_file)) if index is not None: save_index_file = os.path.join(output_dir, WEIGHTS_INDEX_NAME) # Save the index as well with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( "Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." ) shard_files = list(shards.keys()) del state_dict del shards gc.collect() for shard_file in shard_files: state_dict = torch.load(os.path.join(output_dir, shard_file)) torch.save({k: v.cpu().clone() for k, v in state_dict.items()}, os.path.join(output_dir, shard_file)) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("Please provide a `model_name` to push the model to the Hub.") model = AutoModelForCausalLM.from_pretrained(output_dir) model.push_to_hub(model_name, max_shard_size="2GB") tokenizer.push_to_hub(model_name) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint." ) parser.add_argument( "--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="Where to save the converted model." ) parser.add_argument( "--tokenizer_file", default=None, type=str, help="Path to the tokenizer file to use (if not provided, only the model is converted).", ) parser.add_argument( "--size", default=None, type=str, help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Push to the Hub the converted model.", ) parser.add_argument( "--model_name", default=None, type=str, help="Name of the pushed model on the Hub, including the username / organization.", ) args = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import sys import warnings import flatdict import torch from transformers import FuyuConfig, FuyuForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast tokenizer_class = LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) tokenizer_class = LlamaTokenizer """ Sample usage: # TODO fix clone links from persimmon to fuyu ``` git clone https://github.com/adept-ai-labs/adept-inference wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_base_model_release.tar wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_chat_model_release.tar python src/transformers/models/fuyu/convert_fuyu_weights_to_hf.py --input_dir /path/to/downloaded/fuyu/weights/ --output_dir /output/path ``` Thereafter, models can be loaded via: ```py from transformers import FuyuForCausalLM, FuyuTokenizer model = FuyuForCausalLM.from_pretrained("/output/path") tokenizer = FuyuTokenizer.from_pretrained("/output/path") ``` Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). """ KEYS_TO_MODIFY_MAPPING = { "self_attention": "self_attn", "language_model.encoder": "language_model.model", "word_embeddings_for_head": "language_model.lm_head", "language_model.embedding.word_embeddings": "language_model.model.embed_tokens", "vit_encoder.linear_encoder": "vision_embed_tokens", } KEYS_TO_REMOVE = { "rotary_emb.inv_freq", "image_patch_projection", "image_patch_projection.weight", "image_patch_projection.bias", } def rename_state_dict(state_dict): model_state_dict = {} for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) # if KEYS_TO_REMOVE in key: if key in KEYS_TO_REMOVE: continue model_state_dict[key] = value return model_state_dict def convert_fuyu_checkpoint(pytorch_dump_folder_path, ada_lib_path, pt_model_path, safe_serialization=False): sys.path.insert(0, ada_lib_path) model_state_dict_base = torch.load(pt_model_path, map_location="cpu") state_dict = flatdict.FlatDict(model_state_dict_base["model"], ".") state_dict = rename_state_dict(state_dict) transformers_config = FuyuConfig() model = FuyuForCausalLM(transformers_config).to(torch.bfloat16) model.load_state_dict(state_dict) model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization) transformers_config.save_pretrained(pytorch_dump_folder_path) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", help="Location of Fuyu weights, which contains tokenizer.model and model folders", ) parser.add_argument( "--pt_model_path", help="Location of Fuyu `model_optim_rng.pt`", ) parser.add_argument( "--output_dir", help="Location to write HF model and tokenizer", ) parser.add_argument( "--ada_lib_path", help="Location of original source code from adept to deserialize .pt checkpoint", ) parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.") args = parser.parse_args() spm_path = os.path.join(args.input_dir, "adept_vocab.model") convert_fuyu_checkpoint( pytorch_dump_folder_path=args.output_dir, pt_model_path=args.pt_model_path, safe_serialization=args.safe_serialization, ada_lib_path=args.ada_lib_path, ) tokenizer = tokenizer_class(spm_path, bos_token="|ENDOFTEXT|", eos_token="|ENDOFTEXT|") tokenizer.save_pretrained(args.output_dir) if __name__ == "__main__": main()
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/fuyu/configuration_fuyu.py
# coding=utf-8 # Copyright 2023 Adept AI and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fuyu model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) from ..deprecated._archive_maps import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class FuyuConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FuyuForCausalLM`]. It is used to instantiate an Fuyu model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [adept/fuyu-8b](https://huggingface.co/adept/fuyu-8b). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 262144): Vocabulary size of the Fuyu model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`FuyuForCausalLM`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 16384): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 36): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 16384): The maximum sequence length that this model might ever be used with. image_size (`int`, *optional*, defaults to 300): The input image size. patch_size (`int`, *optional*, defaults to 30): The input vision transformer encoding patch size. num_channels (`int`, *optional*, defaults to 3): The input image number of channels. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Whether to tie weight embeddings tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie input and output embeddings. rope_theta (`float`, *optional*, defaults to 25000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalFuyu/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. qk_layernorm (`bool`, *optional*, defaults to `True`): Whether or not to normalize the Queries and Keys after projecting the hidden states hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio after applying the MLP to the hidden states. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio after computing the attention scores. partial_rotary_factor (`float`, *optional*, defaults to 0.5): Percentage of the query and keys which will have rotary embedding. pad_token_id (`int`, *optional*): The id of the *padding* token. bos_token_id (`int`, *optional*, defaults to 1): The id of the *beginning-of-sequence* token. eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2): The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. text_config (`dict`, *optional*): Dictionary of configuration options used to initialize the `language``[`Aut`]. ```python >>> from transformers import FuyuConfig >>> # Initializing a Fuyu fuyu-7b style configuration >>> configuration = FuyuConfig() ```""" model_type = "fuyu" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=262144, hidden_size=4096, intermediate_size=16384, num_hidden_layers=36, num_attention_heads=64, hidden_act="relu2", max_position_embeddings=16384, image_size=300, patch_size=30, num_channels=3, initializer_range=0.02, layer_norm_eps=1e-5, use_cache=True, tie_word_embeddings=False, rope_theta=25000.0, rope_scaling=None, qk_layernorm=True, hidden_dropout=0.0, attention_dropout=0.0, partial_rotary_factor=0.5, pad_token_id=None, bos_token_id=1, eos_token_id=2, text_config=None, **kwargs, ): if text_config is None: text_config = { "vocab_size": vocab_size, "max_position_embeddings": max_position_embeddings, "hidden_size": hidden_size, "intermediate_size": intermediate_size, "num_hidden_layers": num_hidden_layers, "num_attention_heads": num_attention_heads, "hidden_act": hidden_act, "initializer_range": initializer_range, "layer_norm_eps": layer_norm_eps, "use_cache": use_cache, "rope_theta": rope_theta, "rope_scaling": rope_scaling, "qk_layernorm": qk_layernorm, "hidden_dropout": hidden_dropout, "attention_dropout": attention_dropout, "partial_rotary_factor": partial_rotary_factor, "pad_token_id": pad_token_id, "bos_token_id": bos_token_id, "eos_token_id": eos_token_id, "tie_word_embeddings": tie_word_embeddings, } logger.info("text_config is None. initializing the text model with default values.") text_model_type = text_config["model_type"] if "model_type" in text_config else "persimmon" self.text_config = CONFIG_MAPPING[text_model_type](**text_config) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.qk_layernorm = qk_layernorm self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.partial_rotary_factor = partial_rotary_factor self._rope_scaling_validation() super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError( "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_factor = self.rope_scaling.get("factor", None) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/fuyu/modeling_fuyu.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Fuyu model.""" from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...modeling_outputs import CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...models.auto.modeling_auto import AutoModelForCausalLM from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_fuyu import FuyuConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "FuyuConfig" FUYU_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`FuyuConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Fuyu Model outputting raw hidden-states without any specific head on top.", FUYU_START_DOCSTRING, ) class FuyuPreTrainedModel(PreTrainedModel): config_class = FuyuConfig base_model_prefix = "fuyu" supports_gradient_checkpointing = True _no_split_modules = [] _skip_keys_device_placement = "past_key_values" def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() FUYU_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*): Image patches to be used as continuous embeddings. The patches are flattened and then projected to the hidden size of the model. image_patches_indices (`torch.LongTensor` of shape `(batch_size, num_total_patches + number_of_newline_tokens + number_of_text_tokens, patch_size_ x patch_size x num_channels )`, *optional*): Indices indicating at which position the image_patches have to be inserted in input_embeds. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.", FUYU_START_DOCSTRING, ) class FuyuForCausalLM(FuyuPreTrainedModel): def __init__(self, config: FuyuConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.language_model = AutoModelForCausalLM.from_config( config.text_config, attn_implementation=config._attn_implementation ) self.vision_embed_tokens = nn.Linear( config.patch_size * config.patch_size * config.num_channels, config.hidden_size ) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def gather_continuous_embeddings( self, word_embeddings: torch.Tensor, continuous_embeddings: List[torch.Tensor], image_patch_input_indices: torch.Tensor, ) -> torch.Tensor: """This function places the continuous_embeddings into the word_embeddings at the locations indicated by image_patch_input_indices. Different batch elements can have different numbers of continuous embeddings. Args: word_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Tensor of word embeddings. continuous_embeddings (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Tensor of continuous embeddings. The length of the list is the batch size. Each entry is shape [num_image_embeddings, hidden], and num_image_embeddings needs to match the number of non-negative indices in image_patch_input_indices for that batch element. image_patch_input_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Tensor of indices of the image patches in the input_ids tensor. """ if not (word_embeddings.shape[0] == len(continuous_embeddings)): raise ValueError( f"Batch sizes must match! Got {len(continuous_embeddings)=} and {word_embeddings.shape[0]=}" ) output_embeddings = word_embeddings.clone() for batch_idx in range(word_embeddings.shape[0]): # First, find the positions of all the non-negative values in image_patch_input_indices, those are the # positions in word_embeddings that we want to replace with content from continuous_embeddings. dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0] # Next look up those indices in image_patch_input_indices to find the indices in continuous_embeddings that we # want to use to replace the values in word_embeddings. src_indices = image_patch_input_indices[batch_idx][dst_indices] # Check if we have more indices than embeddings. Note that we could have fewer indices if images got truncated. if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]: raise ValueError( f"Number of continuous embeddings {continuous_embeddings[batch_idx].shape=} does not match " f"number of continuous token ids {src_indices.shape=} in batch element {batch_idx}." ) output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices] return output_embeddings @add_start_docstrings_to_model_forward(FUYU_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, image_patches: torch.Tensor = None, # [batch_size, num_total_patches, patch_size_ x patch_size x num_channels ] image_patches_indices: torch.Tensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Examples: ```python >>> from transformers import FuyuProcessor, FuyuForCausalLM >>> from PIL import Image >>> import requests >>> processor = FuyuProcessor.from_pretrained("adept/fuyu-8b") >>> model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b") >>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" >>> image = Image.open(requests.get(url, stream=True).raw) >>> prompt = "Generate a coco-style caption.\n" >>> inputs = processor(text=prompt, images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> generated_ids = model.generate(**inputs, max_new_tokens=7) >>> generation_text = processor.batch_decode(generated_ids[:, -7:], skip_special_tokens=True) >>> print(generation_text[0]) A blue bus parked on the side of a road. ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_is or inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if image_patches is not None and past_key_values is None: patch_embeddings = [ self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype)) .squeeze(0) .to(inputs_embeds.device) for patch in image_patches ] inputs_embeds = self.gather_continuous_embeddings( word_embeddings=inputs_embeds, continuous_embeddings=patch_embeddings, image_patch_input_indices=image_patches_indices, ) outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, labels=labels, use_cache=use_cache, return_dict=return_dict, ) return outputs def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, image_patches=None, image_patches_indices=None, **kwargs, ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} if image_patches_indices is not None: model_inputs["image_patches_indices"] = image_patches_indices model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "image_patches_indices": image_patches_indices if past_key_values is None else None, "image_patches": image_patches if past_key_values is None else None, } ) return model_inputs
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/fuyu/__init__.py
# Copyright 2023 AdeptAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = { "configuration_fuyu": ["FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP", "FuyuConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["image_processing_fuyu"] = ["FuyuImageProcessor"] _import_structure["processing_fuyu"] = ["FuyuProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_fuyu"] = [ "FuyuForCausalLM", "FuyuPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fuyu import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP, FuyuConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_fuyu import FuyuImageProcessor from .processing_fuyu import FuyuProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fuyu import ( FuyuForCausalLM, FuyuPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/fuyu/processing_fuyu.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for GIT """ import re from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, TruncationStrategy from ...utils import TensorType, is_torch_available, logging, requires_backends if is_torch_available(): from .image_processing_fuyu import FuyuBatchFeature logger = logging.get_logger(__name__) if is_torch_available(): import torch TEXT_REPR_BBOX_OPEN = "<box>" TEXT_REPR_BBOX_CLOSE = "</box>" TEXT_REPR_POINT_OPEN = "<point>" TEXT_REPR_POINT_CLOSE = "</point>" TOKEN_BBOX_OPEN_STRING = "<0x00>" # <bbox> TOKEN_BBOX_CLOSE_STRING = "<0x01>" # </bbox> TOKEN_POINT_OPEN_STRING = "<0x02>" # <point> TOKEN_POINT_CLOSE_STRING = "<0x03>" # </point> BEGINNING_OF_ANSWER_STRING = "<0x04>" # <boa> def full_unpacked_stream_to_tensor( all_bi_tokens_to_place: List[int], full_unpacked_stream: List["torch.Tensor"], fill_value: int, batch_size: int, new_seq_len: int, offset: int, ) -> "torch.Tensor": """Takes an unpacked stream of tokens (i.e. a list of tensors, one for each item in the batch) and does the required padding to create a single tensor for the batch of shape batch_size x new_seq_len. """ assert len(all_bi_tokens_to_place) == batch_size assert len(full_unpacked_stream) == batch_size # Create padded tensors for the full batch. new_padded_tensor = torch.full( [batch_size, new_seq_len], fill_value=fill_value, dtype=full_unpacked_stream[0].dtype, device=full_unpacked_stream[0].device, ) # Place each batch entry into the batch tensor. for bi in range(batch_size): tokens_to_place = all_bi_tokens_to_place[bi] new_padded_tensor[bi, :tokens_to_place] = full_unpacked_stream[bi][offset : tokens_to_place + offset] return new_padded_tensor def construct_full_unpacked_stream( num_real_text_tokens: Union[List[List[int]], "torch.Tensor"], input_stream: "torch.Tensor", image_tokens: List[List["torch.Tensor"]], batch_size: int, num_sub_sequences: int, ) -> List["torch.Tensor"]: """Takes an input_stream tensor of shape B x S x ?. For each subsequence, adds any required padding to account for images and then unpacks the subsequences to create a single sequence per item in the batch. Returns a list of tensors, one for each item in the batch.""" all_bi_stream = [] for batch_index in range(batch_size): all_si_stream = [] # First, construct full token stream (including image placeholder tokens) and loss mask for each subsequence # and append to lists. We use lists rather than tensors because each subsequence is variable-sized. # TODO Remove this logic in a subsequent release since subsequences are not supported. image_adjustment = image_tokens[batch_index][0] subsequence_stream = torch.cat([image_adjustment, input_stream[batch_index, 0]], dim=0) num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[batch_index][0] all_si_stream.append(subsequence_stream[:num_real_tokens]) all_bi_stream.append(torch.cat(all_si_stream, dim=0)) return all_bi_stream def _replace_string_repr_with_token_tags(prompt: str) -> str: prompt = prompt.replace(TEXT_REPR_POINT_OPEN, TOKEN_POINT_OPEN_STRING) prompt = prompt.replace(TEXT_REPR_POINT_CLOSE, TOKEN_POINT_CLOSE_STRING) prompt = prompt.replace(TEXT_REPR_BBOX_OPEN, TOKEN_BBOX_OPEN_STRING) prompt = prompt.replace(TEXT_REPR_BBOX_CLOSE, TOKEN_BBOX_CLOSE_STRING) return prompt def _segment_prompt_into_text_token_conversions(prompt: str) -> List: """ Given a string prompt, converts the prompt into a list of TextTokenConversions. """ # Wherever, we notice the [TOKEN_OPEN_STRING, TOKEN_CLOSE_STRING], we split the prompt prompt_text_list: List = [] regex_pattern = re.compile( f"({TOKEN_BBOX_OPEN_STRING}|{TOKEN_BBOX_CLOSE_STRING}|{TOKEN_POINT_OPEN_STRING}|{TOKEN_POINT_CLOSE_STRING})" ) # Split by the regex pattern prompt_split = regex_pattern.split(prompt) for i, elem in enumerate(prompt_split): if len(elem) == 0 or elem in [ TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING, ]: continue prompt_text_list.append( (elem, i > 1 and prompt_split[i - 1] in [TOKEN_BBOX_OPEN_STRING, TOKEN_POINT_OPEN_STRING]) ) return prompt_text_list def _transform_coordinates_and_tokenize(prompt: str, scale_factor: float, tokenizer) -> List[int]: """ This function transforms the prompt in the following fashion: - <box> <point> and </box> </point> to their respective token mappings - extract the coordinates from the tag - transform the coordinates into the transformed image space - return the prompt tokens with the transformed coordinates and new tags Bounding boxes and points MUST be in the following format: <box>y1, x1, y2, x2</box> <point>x, y</point> The spaces and punctuation added above are NOT optional. """ # Make a namedtuple that stores "text" and "is_bbox" # We want to do the following: Tokenize the code normally -> when we see a point or box, tokenize using the tokenize_within_tag function # When point or box close tag, continue tokenizing normally # First, we replace the point and box tags with their respective tokens prompt = _replace_string_repr_with_token_tags(prompt) # Tokenize the prompt # Convert prompt into a list split prompt_text_list = _segment_prompt_into_text_token_conversions(prompt) transformed_prompt_tokens: List[int] = [] for elem in prompt_text_list: if elem[1]: # This is a location, we need to tokenize it within_tag_tokenized = _transform_within_tags(elem[0], scale_factor, tokenizer) # Surround the text with the open and close tags transformed_prompt_tokens.extend(within_tag_tokenized) else: transformed_prompt_tokens.extend(tokenizer(elem[0], add_special_tokens=False).input_ids) return transformed_prompt_tokens def _transform_within_tags(text: str, scale_factor: float, tokenizer) -> List[int]: """ Given a bounding box of the fashion <box>1, 2, 3, 4</box> | <point>1, 2</point> This function is responsible for converting 1, 2, 3, 4 into tokens of 1 2 3 4 without any commas. """ # Convert the text into a list of strings. num_int_strs = text.split(",") if len(num_int_strs) == 2: # If there are any open or close tags, remove them. token_space_open_string = tokenizer.vocab[TOKEN_POINT_OPEN_STRING] token_space_close_string = tokenizer.vocab[TOKEN_POINT_CLOSE_STRING] else: token_space_open_string = tokenizer.vocab[TOKEN_BBOX_OPEN_STRING] token_space_close_string = tokenizer.vocab[TOKEN_BBOX_CLOSE_STRING] # Remove all spaces from num_ints num_ints = [float(num.strip()) for num in num_int_strs] # scale to transformed image siz if len(num_ints) == 2: num_ints_translated = scale_point_to_transformed_image(x=num_ints[0], y=num_ints[1], scale_factor=scale_factor) elif len(num_ints) == 4: num_ints_translated = scale_bbox_to_transformed_image( top=num_ints[0], left=num_ints[1], bottom=num_ints[2], right=num_ints[3], scale_factor=scale_factor, ) else: raise ValueError(f"Invalid number of ints: {len(num_ints)}") # Tokenize the text, skipping the tokens = [tokenizer.vocab[str(num)] for num in num_ints_translated] return [token_space_open_string] + tokens + [token_space_close_string] def _tokenize_prompts_with_image_and_batch( tokenizer, prompts: List[List[str]], scale_factors: Optional[List[List["torch.Tensor"]]], max_tokens_to_generate: int, max_position_embeddings: int, add_BOS: bool, # Same issue with types as above add_beginning_of_answer_token: bool, ) -> Tuple["torch.Tensor", "torch.Tensor"]: """ Given a set of prompts and number of tokens to generate: - tokenize prompts - set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate - pad all the sequences to this length so we can convert them into a 3D tensor. """ # If not tool use, tranform the coordinates while tokenizing if scale_factors is not None: transformed_prompt_tokens = [] for prompt_seq, scale_factor_seq in zip(prompts, scale_factors): transformed_prompt_tokens.append( [ _transform_coordinates_and_tokenize(prompt, scale_factor.item(), tokenizer) for prompt, scale_factor in zip(prompt_seq, scale_factor_seq) ] ) else: transformed_prompt_tokens = [[tokenizer.tokenize(prompt) for prompt in prompt_seq] for prompt_seq in prompts] prompts_tokens = transformed_prompt_tokens if add_BOS: bos_token = tokenizer.vocab["<s>"] else: bos_token = tokenizer.vocab["|ENDOFTEXT|"] prompts_tokens = [[[bos_token] + x for x in prompt_seq] for prompt_seq in prompts_tokens] if add_beginning_of_answer_token: boa = tokenizer.vocab[BEGINNING_OF_ANSWER_STRING] # Only add bbox open token to the last subsequence since that is what will be completed for token_seq in prompts_tokens: token_seq[-1].append(boa) # Now we have a list of list of tokens which each list has a different # size. We want to extend this list to: # - incorporate the tokens that need to be generated # - make all the sequences equal length. # Get the prompts length. prompts_length = [[len(x) for x in prompts_tokens_seq] for prompts_tokens_seq in prompts_tokens] # Get the max prompts length. max_prompt_len: int = np.max(prompts_length) # Number of tokens in the each sample of the batch. samples_length = min(max_prompt_len + max_tokens_to_generate, max_position_embeddings) if max_prompt_len + max_tokens_to_generate > max_position_embeddings: logger.warning( f"Max subsequence prompt length of {max_prompt_len} + max tokens to generate {max_tokens_to_generate}", f"exceeds context length of {max_position_embeddings}. Will generate as many tokens as possible.", ) # Now update the list of list to be of the same size: samples_length. for prompt_tokens_seq, prompts_length_seq in zip(prompts_tokens, prompts_length): for prompt_tokens, prompt_length in zip(prompt_tokens_seq, prompts_length_seq): if len(prompt_tokens) > samples_length: raise ValueError("Length of subsequence prompt exceeds sequence length.") padding_size = samples_length - prompt_length prompt_tokens.extend([tokenizer.vocab["|ENDOFTEXT|"]] * padding_size) # Now we are in a structured format, we can convert to tensors. prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.int64) prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.int64) return prompts_tokens_tensor, prompts_length_tensor # Simplified assuming self.crop_top = self.padding_top = 0 def original_to_transformed_h_coords(original_coords, scale_h): return np.round(original_coords * scale_h).astype(np.int32) # Simplified assuming self.crop_left = self.padding_left = 0 def original_to_transformed_w_coords(original_coords, scale_w): return np.round(original_coords * scale_w).astype(np.int32) def scale_point_to_transformed_image(x: float, y: float, scale_factor: float) -> List[int]: x_scaled = original_to_transformed_w_coords(np.array([x / 2]), scale_factor)[0] y_scaled = original_to_transformed_h_coords(np.array([y / 2]), scale_factor)[0] return [x_scaled, y_scaled] def scale_bbox_to_transformed_image( top: float, left: float, bottom: float, right: float, scale_factor: float ) -> List[int]: top_scaled = original_to_transformed_w_coords(np.array([top / 2]), scale_factor)[0] left_scaled = original_to_transformed_h_coords(np.array([left / 2]), scale_factor)[0] bottom_scaled = original_to_transformed_w_coords(np.array([bottom / 2]), scale_factor)[0] right_scaled = original_to_transformed_h_coords(np.array([right / 2]), scale_factor)[0] return [top_scaled, left_scaled, bottom_scaled, right_scaled] class FuyuProcessor(ProcessorMixin): r""" Constructs a Fuyu processor which wraps a Fuyu image processor and a Llama tokenizer into a single processor. [`FuyuProcessor`] offers all the functionalities of [`FuyuImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~FuyuProcessor.__call__`] and [`~FuyuProcessor.decode`] for more information. Args: image_processor ([`FuyuImageProcessor`]): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`]): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "FuyuImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor, tokenizer): super().__init__(image_processor=image_processor, tokenizer=tokenizer) self.image_processor = image_processor self.tokenizer = tokenizer self.max_tokens_to_generate = 10 self.max_position_embeddings = 16384 # TODO Can't derive this from model files: where to set it? self.pad_token_id = 0 self.dummy_image_index = -1 def _left_pad_inputs_with_attention_mask(self, model_inputs: List[Dict], return_attention_mask: bool): max_length_input_ids = max(entry["input_ids"].shape[1] for entry in model_inputs) max_length_image_patch_indices = max(entry["image_patches_indices"].shape[1] for entry in model_inputs) batched_inputs = {"input_ids": [], "image_patches": [], "image_patches_indices": [], "attention_mask": []} for entry in model_inputs: for key, tensor in entry.items(): if key == "input_ids": num_padding_tokens = max_length_input_ids - tensor.shape[1] padded_input_ids = torch.cat( [ torch.full((tensor.shape[0], num_padding_tokens), self.pad_token_id, dtype=torch.long), tensor, ], dim=1, ) batched_inputs[key].append(padded_input_ids) attention_mask = torch.cat( [torch.zeros(tensor.shape[0], num_padding_tokens, dtype=torch.long), torch.ones_like(tensor)], dim=1, ) batched_inputs["attention_mask"].append(attention_mask) elif key == "image_patches": # For image_patches, we don't pad but just append them to the list. batched_inputs[key].append(tensor) else: # for image_patches_indices num_padding_indices = max_length_image_patch_indices - tensor.shape[1] padded_indices = torch.cat( [ torch.full( (tensor.shape[0], num_padding_indices), self.dummy_image_index, dtype=torch.long ), tensor, ], dim=1, ) batched_inputs[key].append(padded_indices) batched_keys = ["input_ids", "image_patches_indices"] if return_attention_mask: batched_keys.append("attention_mask") for key in batched_keys: batched_inputs[key] = torch.cat(batched_inputs[key], dim=0) return batched_inputs def get_sample_encoding( self, prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, image_placeholder_id, image_newline_id, tensor_batch_images, ): image_present = torch.ones(1, 1, 1) model_image_input = self.image_processor.preprocess_with_tokenizer_info( image_input=tensor_batch_images, image_present=image_present, image_unpadded_h=image_unpadded_heights, image_unpadded_w=image_unpadded_widths, image_placeholder_id=image_placeholder_id, image_newline_id=image_newline_id, variable_sized=True, ) # FIXME max_tokens_to_generate is embedded into this processor's call. prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch( tokenizer=self.tokenizer, prompts=prompts, scale_factors=scale_factors, max_tokens_to_generate=self.max_tokens_to_generate, max_position_embeddings=self.max_position_embeddings, add_BOS=True, add_beginning_of_answer_token=True, ) image_padded_unpacked_tokens = construct_full_unpacked_stream( num_real_text_tokens=prompts_length, input_stream=prompt_tokens, image_tokens=model_image_input["image_input_ids"], batch_size=1, num_sub_sequences=self.subsequence_length, ) # Construct inputs for image patch indices. unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream( num_real_text_tokens=prompts_length, input_stream=torch.full_like(prompt_tokens, -1), image_tokens=model_image_input["image_patch_indices_per_batch"], batch_size=1, num_sub_sequences=self.subsequence_length, ) max_prompt_length = max(x.shape[-1] for x in image_padded_unpacked_tokens) max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings) tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[0].shape[0])) # Use same packing logic for the image patch indices. image_patch_input_indices = full_unpacked_stream_to_tensor( all_bi_tokens_to_place=[tokens_to_place], full_unpacked_stream=unpacked_image_patch_indices_per_batch, fill_value=-1, batch_size=1, new_seq_len=max_seq_len_batch, offset=0, ) image_patches_tensor = torch.stack([img[0] for img in model_image_input["image_patches"]]) batch_encoding = { "input_ids": image_padded_unpacked_tokens[0].unsqueeze(0), "image_patches": image_patches_tensor, "image_patches_indices": image_patch_input_indices, } return batch_encoding def __call__( self, text=None, images=None, add_special_tokens: bool = True, return_attention_mask: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_token_type_ids: bool = False, return_length: bool = False, verbose: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> "FuyuBatchFeature": """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to FuyuImageProcessor's [`~FuyuImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `List[PIL.Image.Image]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. Returns: [`FuyuBatchEncoding`]: A [`FuyuBatchEncoding`] with the following fields: - **input_ids** -- Tensor of token ids to be fed to a model. Returned when `text` is not `None`. - **image_patches** -- List of Tensor of image patches. Returned when `images` is not `None`. - **image_patches_indices** -- Tensor of indices where patch embeddings have to be inserted by the model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model when `return_attention_mask=True`. """ requires_backends(self, ["torch"]) # --- Check input validity --- if not return_attention_mask: raise ValueError("`return_attention_mask=False` is not supported for this model.") if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be None.") if text is not None and images is None: logger.warning("You are processing a text with no associated image. Make sure it is intended.") self.current_processor = self.tokenizer text_encoding = self.tokenizer( text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) return text_encoding if text is None and images is not None: logger.warning("You are processing an image with no associated text. Make sure it is intended.") prompts = [[""]] if text is not None and images is not None: if isinstance(text, str): prompts = [[text]] elif isinstance(text, list): prompts = [[text_seq] for text_seq in text] # --- Preprocess images using self.image_processor --- # FIXME - We hard code "pt" here because the rest of the processing assumes torch tensors image_encoding = self.image_processor.preprocess(images, return_tensors="pt") batch_images = image_encoding["images"] image_unpadded_heights = image_encoding["image_unpadded_heights"] image_unpadded_widths = image_encoding["image_unpadded_widths"] scale_factors = image_encoding["image_scale_factors"] self.subsequence_length = 1 # Each batch contains only one sequence. self.batch_size = len(batch_images) # --- Use self.tokenizer to get the ids of special tokens to insert into image ids --- image_placeholder_id = self.tokenizer("|SPEAKER|", add_special_tokens=False)["input_ids"][1] image_newline_id = self.tokenizer("|NEWLINE|", add_special_tokens=False)["input_ids"][1] tensor_batch_images = torch.stack([img[0] for img in batch_images]).unsqueeze(1) # --- Use self.image_processor again to obtain the full token ids and batch inputs --- all_encodings = [] for prompt, scale_factor, image_unpadded_height, image_unpadded_width, tensor_batch_image in zip( prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, tensor_batch_images ): sample_encoding = self.get_sample_encoding( prompts=[prompt], scale_factors=[scale_factor], image_unpadded_heights=torch.tensor([image_unpadded_height]), image_unpadded_widths=torch.tensor([image_unpadded_width]), image_placeholder_id=image_placeholder_id, image_newline_id=image_newline_id, tensor_batch_images=tensor_batch_image.unsqueeze(0), ) all_encodings.append(sample_encoding) batch_encoding = self._left_pad_inputs_with_attention_mask( model_inputs=all_encodings, return_attention_mask=return_attention_mask ) return FuyuBatchFeature(data=batch_encoding) def post_process_box_coordinates(self, outputs, target_sizes=None): """ Transforms raw coordinates detected by [`FuyuForCausalLM`] to the original images' coordinate space. Coordinates will be returned in "box" format, with the following pattern: `<box>top, left, bottom, right</box>` Point coordinates are not supported yet. Args: outputs ([`GenerateOutput`]): Raw outputs from `generate`. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, found coordinates in the output sequence are rescaled to the target sizes. If left to None, coordinates will not be rescaled. Returns: `GenerateOutput`: Same output type returned by `generate`, with output token ids replaced with boxed and possible rescaled coordinates. """ def scale_factor_to_fit(original_size, target_size=None): height, width = original_size if target_size is None: max_height = self.image_processor.size["height"] max_width = self.image_processor.size["width"] else: max_height, max_width = target_size if width <= max_width and height <= max_height: return 1.0 return min(max_height / height, max_width / width) def find_delimiters_pair(tokens, start_token, end_token): start_id = self.tokenizer.convert_tokens_to_ids(start_token) end_id = self.tokenizer.convert_tokens_to_ids(end_token) starting_positions = (tokens == start_id).nonzero(as_tuple=True)[0] ending_positions = (tokens == end_id).nonzero(as_tuple=True)[0] if torch.any(starting_positions) and torch.any(ending_positions): return (starting_positions[0], ending_positions[0]) return (None, None) def tokens_to_boxes(tokens, original_size): while (pair := find_delimiters_pair(tokens, TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING)) != ( None, None, ): start, end = pair if end != start + 5: continue # Retrieve transformed coordinates from tokens coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end]) # Scale back to original image size and multiply by 2 scale = scale_factor_to_fit(original_size) top, left, bottom, right = [2 * int(float(c) / scale) for c in coords] # Replace the IDs so they get detokenized right replacement = f" {TEXT_REPR_BBOX_OPEN}{top}, {left}, {bottom}, {right}{TEXT_REPR_BBOX_CLOSE}" replacement = self.tokenizer.tokenize(replacement)[1:] replacement = self.tokenizer.convert_tokens_to_ids(replacement) replacement = torch.tensor(replacement).to(tokens) tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0) return tokens def tokens_to_points(tokens, original_size): while (pair := find_delimiters_pair(tokens, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING)) != ( None, None, ): start, end = pair if end != start + 3: continue # Retrieve transformed coordinates from tokens coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end]) # Scale back to original image size and multiply by 2 scale = scale_factor_to_fit(original_size) x, y = [2 * int(float(c) / scale) for c in coords] # Replace the IDs so they get detokenized right replacement = f" {TEXT_REPR_POINT_OPEN}{x}, {y}{TEXT_REPR_POINT_CLOSE}" replacement = self.tokenizer.tokenize(replacement)[1:] replacement = self.tokenizer.convert_tokens_to_ids(replacement) replacement = torch.tensor(replacement).to(tokens) tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0) return tokens if target_sizes is None: target_sizes = ((self.image_processor.size["height"], self.image_processor.size["width"]),) * len(outputs) elif target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") if len(outputs) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as output sequences") results = [] for seq, size in zip(outputs, target_sizes): seq = tokens_to_boxes(seq, size) seq = tokens_to_points(seq, size) results.append(seq) return results def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/fuyu/image_processing_fuyu.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Fuyu.""" import math from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import ( pad, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, is_valid_image, make_list_of_images, to_numpy_array, validate_preprocess_arguments, ) from ...utils import ( TensorType, is_torch_available, is_torch_device, is_torch_dtype, logging, requires_backends, ) if is_torch_available(): import torch logger = logging.get_logger(__name__) def make_list_of_list_of_images( images: Union[List[List[ImageInput]], List[ImageInput], ImageInput], ) -> List[List[ImageInput]]: if is_valid_image(images): return [[images]] if isinstance(images, list) and all(isinstance(image, list) for image in images): return images if isinstance(images, list): return [make_list_of_images(image) for image in images] raise ValueError("images must be a list of list of images or a list of images or an image.") class FuyuBatchFeature(BatchFeature): """ BatchFeature class for Fuyu image processor and processor. The outputs dictionary from the processors contains a mix of tensors and lists of tensors. """ def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None): """ Convert the inner content to tensors. Args: tensor_type (`str` or [`~utils.TensorType`], *optional*): The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If `None`, no modification is done. """ if tensor_type is None: return self is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type=tensor_type) def _convert_tensor(elem): if is_tensor(elem): return elem return as_tensor(elem) def _safe_convert_tensor(elem): try: return _convert_tensor(elem) except: # noqa E722 if key == "overflowing_values": raise ValueError("Unable to create tensor returning overflowing values of different lengths. ") raise ValueError( "Unable to create tensor, you should probably activate padding " "with 'padding=True' to have batched tensors with the same length." ) # Do the tensor conversion in batch for key, value in self.items(): if isinstance(value, list) and isinstance(value[0], list): # List[List[Any]] -> List[List[Tensor]] self[key] = [[_safe_convert_tensor(elem) for elem in elems] for elems in value] elif isinstance(value, list): # List[Any] -> List[Tensor] self[key] = [_safe_convert_tensor(elem) for elem in value] else: # Any -> Tensor self[key] = _safe_convert_tensor(value) return self def to(self, *args, **kwargs) -> "BatchFeature": """ Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in different `dtypes` and sending the `BatchFeature` to a different `device`. Args: args (`Tuple`): Will be passed to the `to(...)` function of the tensors. kwargs (`Dict`, *optional*): Will be passed to the `to(...)` function of the tensors. Returns: [`BatchFeature`]: The same instance after modification. """ requires_backends(self, ["torch"]) import torch # noqa new_data = {} device = kwargs.get("device") # Check if the args are a device or a dtype if device is None and len(args) > 0: # device should be always the first argument arg = args[0] if is_torch_dtype(arg): # The first argument is a dtype pass elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int): device = arg else: # it's something else raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.") def _to(elem): # check if v is a floating point if torch.is_floating_point(elem): # cast and send to device return elem.to(*args, **kwargs) if device is not None: return elem.to(device=device) return elem # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor` for k, v in self.items(): if isinstance(v, list) and isinstance(v[0], list): # Data structure is a list of lists new_v = [] for elems in v: new_v.append([_to(elem) for elem in elems]) new_data[k] = new_v elif isinstance(v, list): # Data structure is a list new_data[k] = [_to(elem) for elem in v] else: new_data[k] = _to(v) self.data = new_data return self class FuyuImageProcessor(BaseImageProcessor): """ This class should handle the image processing part before the main FuyuForCausalLM. In particular, it should handle: - Processing Images: Taking a batch of images as input. If the images are variable-sized, it resizes them based on the desired patch dimensions. The image output is always img_h, img_w of (1080, 1920) Then, it patches up these images using the patchify_image function. - Creating Image Input IDs: For each patch, a placeholder ID is given to identify where these patches belong in a token sequence. For variable-sized images, each line of patches is terminated with a newline ID. - Image Patch Indices: For each image patch, the code maintains an index where these patches should be inserted in a token stream. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image to `size`. size (`Dict[str, int]`, *optional*, defaults to `{"height": 1080, "width": 1920}`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to `size`. padding_value (`float`, *optional*, defaults to 1.0): The value to pad the image with. padding_mode (`str`, *optional*, defaults to `"constant"`): The padding mode to use when padding the image. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. image_mean (`float`, *optional*, defaults to 0.5): The mean to use when normalizing the image. image_std (`float`, *optional*, defaults to 0.5): The standard deviation to use when normalizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `1 / 255`): The factor to use when rescaling the image. patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 30, "width": 30}`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. """ model_input_names = [ "images", "image_input_ids", "image_patches", "image_patch_indices_per_batch", "image_patch_indices_per_subsequence", ] def __init__( self, do_resize: bool = True, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_pad: bool = True, padding_value: float = 1.0, padding_mode: str = "constant", do_normalize: bool = True, image_mean: Union[float, List[float]] = 0.5, image_std: Union[float, List[float]] = 0.5, do_rescale: bool = True, rescale_factor: float = 1 / 255, patch_size: Optional[Dict[str, int]] = None, **kwargs, ): super().__init__(**kwargs) self.do_resize = do_resize self.size = size if size is not None else {"height": 1080, "width": 1920} self.resample = resample self.do_pad = do_pad self.padding_value = padding_value self.padding_mode = padding_mode self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.patch_size = patch_size if patch_size is not None else {"height": 30, "width": 30} self._valid_processor_keys = [ "images", "do_resize", "size", "resample", "do_pad", "padding_value", "padding_mode", "do_normalize", "image_mean", "image_std", "do_rescale", "rescale_factor", "patch_size", "return_tensors", "data_format", "input_data_format", ] def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ image_height, image_width = get_image_size(image, input_data_format) target_height, target_width = size["height"], size["width"] if image_width <= target_width and image_height <= target_height: return image height_scale_factor = target_height / image_height width_scale_factor = target_width / image_width optimal_scale_factor = min(height_scale_factor, width_scale_factor) new_height = int(image_height * optimal_scale_factor) new_width = int(image_width * optimal_scale_factor) scaled_image = resize( image=image, size=(new_height, new_width), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) return scaled_image def pad_image( self, image: np.ndarray, size: Dict[str, int], mode: str = "constant", constant_values: float = 1.0, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to pad. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. data_format (`ChannelDimension` or `str`, *optional*): The data format of the output image. If unset, the same format as the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ image_height, image_width = get_image_size(image, input_data_format) target_height, target_width = size["height"], size["width"] padding_top = 0 padding_left = 0 padding_bottom = target_height - image_height padding_right = target_width - image_width padded_image = pad( image, padding=((padding_top, padding_bottom), (padding_left, padding_right)), mode=mode, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image def preprocess( self, images, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: Optional[PILImageResampling] = None, do_pad: Optional[bool] = None, padding_value: Optional[float] = None, padding_mode: Optional[str] = None, do_normalize: Optional[bool] = None, image_mean: Optional[float] = None, image_std: Optional[float] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, patch_size: Optional[Dict[str, int]] = None, data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, return_tensors: Optional[TensorType] = None, ): """ Utility function to preprocess the images and extract necessary information about original formats. Args: images (`ImageInput`): Images to preprocess. Expects a single image, a list or images or a list of lists of images. Pixel values range from 0 to 255, or between 0 and 1 if `do_rescale` is `False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image to `size`. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image to `size`. padding_value (`float`, *optional*, defaults to `self.padding_value`): The value to pad the image with. padding_mode (`str`, *optional*, defaults to `self.padding_mode`): The padding mode to use when padding the image. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float`, *optional*, defaults to `self.image_mean`): The mean to use when normalizing the image. image_std (`float`, *optional*, defaults to `self.image_std`): The standard deviation to use when normalizing the image. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): The factor to use when rescaling the image. patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format of the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_pad = do_pad if do_pad is not None else self.do_pad do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std padding_value = padding_value if padding_value is not None else self.padding_value padding_mode = padding_mode if padding_mode is not None else self.padding_mode do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor patch_size = patch_size if patch_size is not None else self.patch_size if isinstance(images, list) and any(isinstance(elem, list) and len(elem) >= 2 for elem in images): raise ValueError("Multiple images for a single sample are not yet supported.") batch_images = make_list_of_list_of_images(images) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg. do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. batch_images = [[to_numpy_array(image) for image in images] for images in batch_images] if is_scaled_image(batch_images[0][0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(batch_images[0][0]) original_image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images] if do_resize: batch_images = [ [self.resize(image, size=size, input_data_format=input_data_format) for image in images] for images in batch_images ] image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images] image_unpadded_heights = [[image_size[0]] for image_size in image_sizes] image_unpadded_widths = [[image_size[1]] for image_size in image_sizes] # scale_h is the same as scale_w image_scale_factors = [ [resized_size[0] / original_size[0]] for original_size, resized_size in zip(original_image_sizes, image_sizes) ] if do_pad: batch_images = [ [ self.pad_image( image, size=size, mode=padding_mode, constant_values=padding_value, input_data_format=input_data_format, ) for image in images ] for images in batch_images ] if do_rescale: batch_images = [ [self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) for image in images] for images in batch_images ] if do_normalize: batch_images = [ [ self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] for images in batch_images ] if data_format is not None: batch_images = [ [to_channel_dimension_format(image, data_format, input_data_format) for image in images] for images in batch_images ] data = { "images": batch_images, "image_unpadded_heights": image_unpadded_heights, "image_unpadded_widths": image_unpadded_widths, "image_scale_factors": image_scale_factors, } return FuyuBatchFeature(data=data, tensor_type=return_tensors) def get_num_patches(self, image_height: int, image_width: int, patch_size: Dict[str, int] = None) -> int: """ Calculate number of patches required to encode an image. Args: image_height (`int`): Height of the image. image_width (`int`): Width of the image. patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. """ patch_size = patch_size if patch_size is not None else self.patch_size patch_height, patch_width = self.patch_size["height"], self.patch_size["width"] if image_height % patch_height != 0: raise ValueError(f"{image_height=} must be divisible by {patch_height}") if image_width % patch_width != 0: raise ValueError(f"{image_width=} must be divisible by {patch_width}") num_patches_per_dim_h = image_height // patch_height num_patches_per_dim_w = image_width // patch_width num_patches = num_patches_per_dim_h * num_patches_per_dim_w return num_patches def patchify_image(self, image: "torch.Tensor", patch_size: Optional[Dict[str, int]] = None) -> "torch.Tensor": """ Convert an image into a tensor of patches. Args: image (`torch.Tensor`): Image to convert. Shape: [batch, channels, height, width] patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. """ requires_backends(self, ["torch"]) patch_size = patch_size if patch_size is not None else self.patch_size patch_height, patch_width = patch_size["height"], patch_size["width"] # TODO refer to https://github.com/ArthurZucker/transformers/blob/0f0a3fe5ca5697ee58faeb5b53f049af720b5e98/src/transformers/models/vit_mae/modeling_vit_mae.py#L871 # torch implementation is faster but does not handle non-squares batch_size, channels, _, _ = image.shape unfolded_along_height = image.unfold(2, patch_height, patch_height) patches = unfolded_along_height.unfold(3, patch_width, patch_width) patches = patches.contiguous() patches = patches.view(batch_size, channels, -1, patch_height, patch_width) patches = patches.permute(0, 2, 3, 4, 1) patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width) return patches def preprocess_with_tokenizer_info( self, image_input: "torch.Tensor", image_present: "torch.Tensor", image_unpadded_h: "torch.Tensor", image_unpadded_w: "torch.Tensor", image_placeholder_id: int, image_newline_id: int, variable_sized: bool, patch_size: Optional[Dict[str, int]] = None, ) -> FuyuBatchFeature: """Process images for model input. In particular, variable-sized images are handled here. Args: image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]): Tensor of images padded to model input size. image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]): Tensor of 1s and 0s indicating whether an image is present. image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]): Tensor of unpadded image heights. image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]): Tensor of unpadded image widths. image_placeholder_id (int): The id of the image placeholder token. Comes from an associated tokenizer. image_newline_id (int): The id of the image newline token. Comes from an associated tokenizer. variable_sized (bool): Whether to process images as variable-sized. patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`): Size of the patches. """ requires_backends(self, ["torch"]) patch_size = patch_size if patch_size is not None else self.patch_size patch_height, patch_width = patch_size["height"], patch_size["width"] # Only images that are present. images: List[List[torch.Tensor]] = [] batch_image_patches: List[List[torch.Tensor]] = [] # Image input ids for every subsequence, including ones with no image present. batch_image_input_ids: List[List[torch.Tensor]] = [] for batch_index in range(image_input.shape[0]): image_input_ids = [] image_patches = [] for subseq_index in range(image_input.shape[1]): if image_present[batch_index, subseq_index]: image = image_input[batch_index, subseq_index] image_height, image_width = image.shape[1], image.shape[2] if variable_sized: # The min() is required here due to floating point issues: # math.ceil(torch.tensor(300).cuda() / 30) == 11 new_h = min( image_height, math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height, ) new_w = min( image_width, math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width, ) image = image[:, :new_h, :new_w] image_height, image_width = new_h, new_w num_patches = self.get_num_patches(image_height=image_height, image_width=image_width) tensor_of_image_ids = torch.full( [num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device ) patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0) assert num_patches == patches.shape[0] if variable_sized: # Now terminate each line with |NEWLINE|. tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width) newline_ids = torch.full( [tensor_of_image_ids.shape[0], 1], image_newline_id, dtype=torch.int32, device=image_input.device, ) tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1) tensor_of_image_ids = tensor_of_image_ids.reshape(-1) images.append([image]) image_input_ids.append(tensor_of_image_ids) image_patches.append(patches) else: image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device)) batch_image_input_ids.append(image_input_ids) batch_image_patches.append(image_patches) # Create image_patch_input_indices, where non-negative values correspond to image patches to be inserted in # the stream. image_patch_indices_per_batch: List[List[torch.Tensor]] = [] image_patch_indices_per_subsequence: List[List[torch.Tensor]] = [] for sample_image_input_ids in batch_image_input_ids: index_offset = 0 per_batch_indices = [] per_subsequence_indices = [] for subseq_image_input_ids in sample_image_input_ids: # Indices of image patches. patches_mask = subseq_image_input_ids == image_placeholder_id num_patches = torch.count_nonzero(patches_mask) indices = torch.arange(num_patches, dtype=torch.int64, device=subseq_image_input_ids.device).type_as( subseq_image_input_ids ) # Place those indices in the image input ids token stream, with -1 representing non-index tokens. indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1) indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1) patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0] indices_in_stream_per_batch[patches_inds] = indices + index_offset indices_in_stream_per_subsequence[patches_inds] = indices per_batch_indices.append(indices_in_stream_per_batch) per_subsequence_indices.append(indices_in_stream_per_subsequence) index_offset += num_patches image_patch_indices_per_batch.append(per_batch_indices) image_patch_indices_per_subsequence.append(per_subsequence_indices) return FuyuBatchFeature( data={ "images": images, "image_input_ids": batch_image_input_ids, "image_patches": batch_image_patches, "image_patch_indices_per_batch": image_patch_indices_per_batch, "image_patch_indices_per_subsequence": image_patch_indices_per_subsequence, } )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/nat/configuration_nat.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Neighborhood Attention Transformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) from ..deprecated._archive_maps import NAT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class NatConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`NatModel`]. It is used to instantiate a Nat model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nat [shi-labs/nat-mini-in1k-224](https://huggingface.co/shi-labs/nat-mini-in1k-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 64): Dimensionality of patch embedding. depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 5]`): Number of layers in each level of the encoder. num_heads (`List[int]`, *optional*, defaults to `[2, 4, 8, 16]`): Number of attention heads in each layer of the Transformer encoder. kernel_size (`int`, *optional*, defaults to 7): Neighborhood Attention kernel size. mlp_ratio (`float`, *optional*, defaults to 3.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 0.0): The initial value for the layer scale. Disabled if <=0. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import NatConfig, NatModel >>> # Initializing a Nat shi-labs/nat-mini-in1k-224 style configuration >>> configuration = NatConfig() >>> # Initializing a model (with random weights) from the shi-labs/nat-mini-in1k-224 style configuration >>> model = NatModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "nat" attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, patch_size=4, num_channels=3, embed_dim=64, depths=[3, 4, 6, 5], num_heads=[2, 4, 8, 16], kernel_size=7, mlp_ratio=3.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", initializer_range=0.02, layer_norm_eps=1e-5, layer_scale_init_value=0.0, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.kernel_size = kernel_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.layer_scale_init_value = layer_scale_init_value self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/nat/modeling_nat.py
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Neighborhood Attention Transformer model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends, ) from ...utils.backbone_utils import BackboneMixin from .configuration_nat import NatConfig if is_natten_available(): from natten.functional import natten2dav, natten2dqkrpb else: def natten2dqkrpb(*args, **kwargs): raise OptionalDependencyNotAvailable() def natten2dav(*args, **kwargs): raise OptionalDependencyNotAvailable() logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "NatConfig" # Base docstring _CHECKPOINT_FOR_DOC = "shi-labs/nat-mini-in1k-224" _EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "shi-labs/nat-mini-in1k-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" from ..deprecated._archive_maps import NAT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 # drop_path and NatDropPath are from the timm library. @dataclass class NatEncoderOutput(ModelOutput): """ Nat encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class NatModelOutput(ModelOutput): """ Nat model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class NatImageClassifierOutput(ModelOutput): """ Nat outputs for image classification. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None class NatEmbeddings(nn.Module): """ Construct the patch and position embeddings. """ def __init__(self, config): super().__init__() self.patch_embeddings = NatPatchEmbeddings(config) self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]: embeddings = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class NatPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() patch_size = config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim self.num_channels = num_channels if patch_size == 4: pass else: # TODO: Support arbitrary patch sizes. raise ValueError("Dinat only supports patch size of 4 at the moment.") self.projection = nn.Sequential( nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), ) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values) embeddings = embeddings.permute(0, 2, 3, 1) return embeddings class NatDownsampler(nn.Module): """ Convolutional Downsampling Layer. Args: dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.dim = dim self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.norm = norm_layer(2 * dim) def forward(self, input_feature: torch.Tensor) -> torch.Tensor: input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) input_feature = self.norm(input_feature) return input_feature # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Nat class NatDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class NeighborhoodAttention(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.kernel_size = kernel_size # rpb is learnable relative positional biases; same concept is used Swin. self.rpb = nn.Parameter(torch.zeros(num_heads, (2 * self.kernel_size - 1), (2 * self.kernel_size - 1))) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 3, 1, 2, 4) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Apply the scale factor before computing attention weights. It's usually more efficient because # attention weights are typically a bigger tensor compared to query. # It gives identical results because scalars are commutable in matrix multiplication. query_layer = query_layer / math.sqrt(self.attention_head_size) # Compute NA between "query" and "key" to get the raw attention scores, and add relative positional biases. attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, 1) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, 1) context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class NeighborhoodAttentionOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NeighborhoodAttentionModule(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size) self.output = NeighborhoodAttentionOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class NatIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class NatOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NatLayer(nn.Module): def __init__(self, config, dim, num_heads, drop_path_rate=0.0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.kernel_size = config.kernel_size self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size) self.drop_path = NatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = NatIntermediate(config, dim) self.output = NatOutput(config, dim) self.layer_scale_parameters = ( nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True) if config.layer_scale_init_value > 0 else None ) def maybe_pad(self, hidden_states, height, width): window_size = self.kernel_size pad_values = (0, 0, 0, 0, 0, 0) if height < window_size or width < window_size: pad_l = pad_t = 0 pad_r = max(0, window_size - width) pad_b = max(0, window_size - height) pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: batch_size, height, width, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) # pad hidden_states if they are smaller than kernel size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape attention_outputs = self.attention(hidden_states, output_attentions=output_attentions) attention_output = attention_outputs[0] was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_output = attention_output[:, :height, :width, :].contiguous() if self.layer_scale_parameters is not None: attention_output = self.layer_scale_parameters[0] * attention_output hidden_states = shortcut + self.drop_path(attention_output) layer_output = self.layernorm_after(hidden_states) layer_output = self.output(self.intermediate(layer_output)) if self.layer_scale_parameters is not None: layer_output = self.layer_scale_parameters[1] * layer_output layer_output = hidden_states + self.drop_path(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class NatStage(nn.Module): def __init__(self, config, dim, depth, num_heads, drop_path_rate, downsample): super().__init__() self.config = config self.dim = dim self.layers = nn.ModuleList( [ NatLayer( config=config, dim=dim, num_heads=num_heads, drop_path_rate=drop_path_rate[i], ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: _, height, width, _ = hidden_states.size() for i, layer_module in enumerate(self.layers): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: hidden_states = self.downsample(hidden_states_before_downsampling) stage_outputs = (hidden_states, hidden_states_before_downsampling) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class NatEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_levels = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.levels = nn.ModuleList( [ NatStage( config=config, dim=int(config.embed_dim * 2**i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path_rate=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=NatDownsampler if (i_layer < self.num_levels - 1) else None, ) for i_layer in range(self.num_levels) ] ) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, NatEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.levels): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] if output_hidden_states and output_hidden_states_before_downsampling: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return NatEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) class NatPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = NatConfig base_model_prefix = "nat" main_input_name = "pixel_values" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) NAT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`NatConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ NAT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Nat Model transformer outputting raw hidden-states without any specific head on top.", NAT_START_DOCSTRING, ) class NatModel(NatPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) requires_backends(self, ["natten"]) self.config = config self.num_levels = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1)) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=NatModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, NatModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return NatModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) @add_start_docstrings( """ Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, NAT_START_DOCSTRING, ) class NatForImageClassification(NatPreTrainedModel): def __init__(self, config): super().__init__(config) requires_backends(self, ["natten"]) self.num_labels = config.num_labels self.nat = NatModel(config) # Classifier head self.classifier = ( nn.Linear(self.nat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=NatImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, NatImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nat( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return NatImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, ) @add_start_docstrings( "NAT backbone, to be used with frameworks like DETR and MaskFormer.", NAT_START_DOCSTRING, ) class NatBackbone(NatPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) requires_backends(self, ["natten"]) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))] # Add layer norms to hidden states of out_features hidden_states_norms = {} for stage, num_channels in zip(self.out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224") >>> model = AutoBackbone.from_pretrained( ... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 512, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, return_dict=True, ) hidden_states = outputs.reshaped_hidden_states feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: # TODO can we simplify this? batch_size, num_channels, height, width = hidden_state.shape hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous() hidden_state = hidden_state.view(batch_size, height * width, num_channels) hidden_state = self.hidden_states_norms[stage](hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/nat/__init__.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {"configuration_nat": ["NAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "NatConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_nat"] = [ "NAT_PRETRAINED_MODEL_ARCHIVE_LIST", "NatForImageClassification", "NatModel", "NatPreTrainedModel", "NatBackbone", ] if TYPE_CHECKING: from .configuration_nat import NAT_PRETRAINED_CONFIG_ARCHIVE_MAP, NatConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nat import ( NAT_PRETRAINED_MODEL_ARCHIVE_LIST, NatBackbone, NatForImageClassification, NatModel, NatPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/vitdet/modeling_vitdet.py
# coding=utf-8 # Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch ViTDet backbone.""" import collections.abc import math from typing import Dict, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput, BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_vitdet import VitDetConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "VitDetConfig" from ..deprecated._archive_maps import VITDET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 class VitDetEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.pretrain_image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches if config.use_absolute_position_embeddings: # Initialize absolute positional embedding with pretrain image size. num_positions = num_patches + 1 self.position_embeddings = nn.Parameter(torch.zeros(1, num_positions, config.hidden_size)) else: self.position_embeddings = None self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def get_absolute_positions(self, abs_pos_embeddings, has_cls_token, height, width): """ Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token dimension for the original embeddings. Args: abs_pos_embeddings (`torch.Tensor`): Absolute positional embeddings with (1, num_position, num_channels). has_cls_token (`bool`): If true, has 1 embedding in abs_pos_embeddings for cls token. height (`int`): Height of input image tokens. width (`int`): Width of input image tokens. Returns: Absolute positional embeddings after processing with shape (1, height, width, num_channels) """ if has_cls_token: abs_pos_embeddings = abs_pos_embeddings[:, 1:] num_position = abs_pos_embeddings.shape[1] size = int(math.sqrt(num_position)) # This is a constant and can be recorded as such in the ONNX export. if size * size != num_position: raise ValueError("Absolute position embeddings must be a square number.") if torch.jit.is_tracing() or (size != height or size != width): # nn.functional.interpolate is a noop in case size == height and size == width - we need to always capture this path with jit.trace. new_abs_pos_embeddings = nn.functional.interpolate( abs_pos_embeddings.reshape(1, size, size, -1).permute(0, 3, 1, 2), size=(height, width), mode="bicubic", align_corners=False, ) return new_abs_pos_embeddings.permute(0, 2, 3, 1) else: return abs_pos_embeddings.reshape(1, height, width, -1) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." f" Expected {self.num_channels} but got {num_channels}." ) embeddings = self.projection(pixel_values) if self.position_embeddings is not None: # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels) embeddings = embeddings.permute(0, 2, 3, 1) # add position embeddings embeddings = embeddings + self.get_absolute_positions( self.position_embeddings, True, embeddings.shape[1], embeddings.shape[2] ) # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width) embeddings = embeddings.permute(0, 3, 1, 2) return embeddings @torch.jit.script_if_tracing # nn.functional.interpolate's `size` needs to be dynamic. def get_rel_pos(q_size, k_size, rel_pos): """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (`int`): Size of query q. k_size (`int`): Size of key k. rel_pos (`torch.Tensor`): Relative position embeddings (num_embeddings, num_channels). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel position embeddings. rel_pos_resized = nn.functional.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] def add_decomposed_relative_positions(attn, queries, rel_pos_h, rel_pos_w, q_size, k_size): """ Calculate decomposed Relative Positional Embeddings as introduced in [MViT2](https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py). Args: attn (`torch.Tensor`): Attention map. queries (`torch.Tensor`): Query q in the attention layer with shape (batch_size, queries_height * queries_width, num_channels). rel_pos_h (`torch.Tensor`): Relative position embeddings (Lh, num_channels) for height axis. rel_pos_w (`torch.Tensor`): Relative position embeddings (Lw, num_channels) for width axis. q_size (`Tuple[int]`): Spatial sequence size of query q with (queries_height, queries_width). k_size (`Tuple[int]`]): Spatial sequence size of key k with (keys_height, keys_width). Returns: attn (Tensor): attention map with added relative positional embeddings. """ queries_height, queries_width = q_size keys_height, keys_width = k_size relative_height = get_rel_pos(queries_height, keys_height, rel_pos_h) relative_width = get_rel_pos(queries_width, keys_width, rel_pos_w) batch_size, _, dim = queries.shape r_q = queries.reshape(batch_size, queries_height, queries_width, dim) relative_height = torch.einsum("bhwc,hkc->bhwk", r_q, relative_height) relative_weight = torch.einsum("bhwc,wkc->bhwk", r_q, relative_width) attn = ( attn.view(batch_size, queries_height, queries_width, keys_height, keys_width) + relative_height[:, :, :, :, None] + relative_weight[:, :, :, None, :] ).view(batch_size, queries_height * queries_width, keys_height * keys_width) return attn class VitDetAttention(nn.Module): """Multi-head Attention block with relative position embeddings.""" def __init__(self, config, input_size=None): """ Args: config (`VitDetConfig`): Model configuration. input_size (`Tuple[int]`, *optional*): Input resolution, only required in case relative position embeddings are added. """ super().__init__() dim = config.hidden_size num_heads = config.num_attention_heads self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=config.qkv_bias) self.proj = nn.Linear(dim, dim) self.use_relative_position_embeddings = config.use_relative_position_embeddings if self.use_relative_position_embeddings: # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) def forward(self, hidden_state, output_attentions=False): batch_size, height, width, _ = hidden_state.shape # qkv with shape (3, batch_size, num_heads, height * width, num_channels) qkv = self.qkv(hidden_state).reshape(batch_size, height * width, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # queries, keys and values have shape (batch_size * num_heads, height * width, num_channels) queries, keys, values = qkv.reshape(3, batch_size * self.num_heads, height * width, -1).unbind(0) attention_scores = (queries * self.scale) @ keys.transpose(-2, -1) if self.use_relative_position_embeddings: attention_scores = add_decomposed_relative_positions( attention_scores, queries, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) attention_probs = attention_scores.softmax(dim=-1) hidden_state = attention_probs @ values hidden_state = hidden_state.view(batch_size, self.num_heads, height, width, -1) hidden_state = hidden_state.permute(0, 2, 3, 1, 4) hidden_state = hidden_state.reshape(batch_size, height, width, -1) hidden_state = self.proj(hidden_state) if output_attentions: attention_probs = attention_probs.reshape( batch_size, self.num_heads, attention_probs.shape[-2], attention_probs.shape[-1] ) outputs = (hidden_state, attention_probs) else: outputs = (hidden_state,) return outputs # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath class VitDetDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class VitDetLayerNorm(nn.Module): """ A LayerNorm variant, popularized by Transformers, that performs point-wise mean and variance normalization over the channel dimension for inputs that have shape (batch_size, channels, height, width). https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 """ def __init__(self, normalized_shape, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.normalized_shape = (normalized_shape,) def forward(self, x): u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x class VitDetResBottleneckBlock(nn.Module): """ The standard bottleneck residual block without the last activation layer. It contains 3 conv layers with kernels 1x1, 3x3, 1x1. """ def __init__(self, config, in_channels, out_channels, bottleneck_channels): """ Args: config (`VitDetConfig`): Model configuration. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. bottleneck_channels (`int`): Number of output channels for the 3x3 "bottleneck" conv layers. """ super().__init__() self.conv1 = nn.Conv2d(in_channels, bottleneck_channels, 1, bias=False) self.norm1 = VitDetLayerNorm(bottleneck_channels) self.act1 = ACT2FN[config.hidden_act] self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels, 3, padding=1, bias=False) self.norm2 = VitDetLayerNorm(bottleneck_channels) self.act2 = ACT2FN[config.hidden_act] self.conv3 = nn.Conv2d(bottleneck_channels, out_channels, 1, bias=False) self.norm3 = VitDetLayerNorm(out_channels) def forward(self, x): out = x for layer in self.children(): out = layer(out) out = x + out return out class VitDetMlp(nn.Module): def __init__(self, config, in_features: int, hidden_features: int) -> None: super().__init__() self.fc1 = nn.Linear(in_features, hidden_features) self.act = ACT2FN[config.hidden_act] self.fc2 = nn.Linear(hidden_features, in_features) self.drop = nn.Dropout(config.dropout_prob) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def window_partition(hidden_state, window_size): """ Partition into non-overlapping windows with padding if needed. Args: hidden_state (`torch.Tensor`): Input tokens with [batch_size, height, width, num_channels]. window_size (`int`): Window size. Returns: `tuple(torch.FloatTensor)` comprising various elements: - windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels]. - (padded_height, padded_width): padded height and width before partition """ batch_size, height, width, num_channels = hidden_state.shape pad_height = (window_size - height % window_size) % window_size pad_width = (window_size - width % window_size) % window_size # Noop in case pad_width == 0 and pad_height == 0. hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height)) padded_height, padded_width = height + pad_height, width + pad_width hidden_state = hidden_state.view( batch_size, padded_height // window_size, window_size, padded_width // window_size, window_size, num_channels ) windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows, (padded_height, padded_width) def window_unpartition(windows, window_size, pad_height_width, height_width): """ Window unpartition into original sequences and removing padding. Args: windows (`torch.Tensor`): Input tokens with [batch_size * num_windows, window_size, window_size, num_channels]. window_size (`int`): Window size. pad_height_width (`Tuple[int]`): Padded height and width (padded_height, padded_width). height_width (`Tuple[int]`): Original height and width before padding. Returns: hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels]. """ padded_height, padded_width = pad_height_width height, width = height_width batch_size = windows.shape[0] // (padded_height * padded_width // window_size // window_size) hidden_state = windows.view( batch_size, padded_height // window_size, padded_width // window_size, window_size, window_size, -1 ) hidden_state = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous() hidden_state = hidden_state.view(batch_size, padded_height, padded_width, -1) # We always have height <= padded_height and width <= padded_width hidden_state = hidden_state[:, :height, :width, :].contiguous() return hidden_state class VitDetLayer(nn.Module): """This corresponds to the Block class in the original implementation.""" def __init__( self, config: VitDetConfig, drop_path_rate: float = 0, window_size: int = 0, use_residual_block: bool = False ) -> None: super().__init__() dim = config.hidden_size input_size = (config.image_size // config.patch_size, config.image_size // config.patch_size) self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = VitDetAttention( config, input_size=input_size if window_size == 0 else (window_size, window_size) ) self.drop_path = VitDetDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.mlp = VitDetMlp(config=config, in_features=dim, hidden_features=int(dim * config.mlp_ratio)) self.window_size = window_size self.use_residual_block = use_residual_block if self.use_residual_block: # Use a residual block with bottleneck channel as dim // 2 self.residual = VitDetResBottleneckBlock( config=config, in_channels=dim, out_channels=dim, bottleneck_channels=dim // 2, ) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: hidden_states = hidden_states.permute(0, 2, 3, 1) shortcut = hidden_states hidden_states = self.norm1(hidden_states) # Window partition if self.window_size > 0: height, width = hidden_states.shape[1], hidden_states.shape[2] hidden_states, pad_height_width = window_partition(hidden_states, self.window_size) self_attention_outputs = self.attention( hidden_states, output_attentions=output_attentions, ) hidden_states = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # Reverse window partition if self.window_size > 0: hidden_states = window_unpartition(hidden_states, self.window_size, pad_height_width, (height, width)) # first residual connection hidden_states = shortcut + self.drop_path(hidden_states) hidden_states = hidden_states + self.drop_path(self.mlp(self.norm2(hidden_states))) hidden_states = hidden_states.permute(0, 3, 1, 2) if self.use_residual_block: hidden_states = self.residual(hidden_states) outputs = (hidden_states,) + outputs return outputs class VitDetEncoder(nn.Module): def __init__(self, config: VitDetConfig) -> None: super().__init__() self.config = config depth = config.num_hidden_layers # stochastic depth decay rule drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, depth)] layers = [] for i in range(depth): layers.append( VitDetLayer( config, drop_path_rate=drop_path_rate[i], window_size=config.window_size if i in config.window_block_indices else 0, use_residual_block=i in config.residual_block_indices, ) ) self.layer = nn.ModuleList(layers) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def caffe2_msra_fill(module: nn.Module) -> None: """ Initialize `module.weight` using the "MSRAFill" implemented in Caffe2. Also initializes `module.bias` to 0. Source: https://detectron2.readthedocs.io/en/latest/_modules/fvcore/nn/weight_init.html. Args: module (torch.nn.Module): module to initialize. """ nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") if module.bias is not None: nn.init.constant_(module.bias, 0) class VitDetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = VitDetConfig base_model_prefix = "vitdet" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = [] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, VitDetEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ).to(module.position_embeddings.dtype) elif isinstance(module, VitDetAttention) and self.config.use_relative_position_embeddings: module.rel_pos_h.data = nn.init.trunc_normal_( module.rel_pos_h.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ) module.rel_pos_w.data = nn.init.trunc_normal_( module.rel_pos_w.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ) elif isinstance(module, VitDetResBottleneckBlock): for layer in [module.conv1, module.conv2, module.conv3]: caffe2_msra_fill(layer) for layer in [module.norm1, module.norm2]: layer.weight.data.fill_(1.0) layer.bias.data.zero_() # zero init last norm layer. module.norm3.weight.data.zero_() module.norm3.bias.data.zero_() VITDET_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`VitDetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ VITDET_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare VitDet Transformer model outputting raw hidden-states without any specific head on top.", VITDET_START_DOCSTRING, ) class VitDetModel(VitDetPreTrainedModel): def __init__(self, config: VitDetConfig): super().__init__(config) self.config = config self.embeddings = VitDetEmbeddings(config) self.encoder = VitDetEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> VitDetEmbeddings: return self.embeddings.projection def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(VITDET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: """ Returns: Examples: ```python >>> from transformers import VitDetConfig, VitDetModel >>> import torch >>> config = VitDetConfig() >>> model = VitDetModel(config) >>> pixel_values = torch.randn(1, 3, 224, 224) >>> with torch.no_grad(): ... outputs = model(pixel_values) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 768, 14, 14] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """ ViTDet backbone, to be used with frameworks like Mask R-CNN. """, VITDET_START_DOCSTRING, ) class VitDetBackbone(VitDetPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.embeddings = VitDetEmbeddings(config) self.encoder = VitDetEncoder(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] # initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> VitDetEmbeddings: return self.embeddings.projection @add_start_docstrings_to_model_forward(VITDET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import VitDetConfig, VitDetBackbone >>> import torch >>> config = VitDetConfig() >>> model = VitDetBackbone(config) >>> pixel_values = torch.randn(1, 3, 224, 224) >>> with torch.no_grad(): ... outputs = model(pixel_values) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 768, 14, 14] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict, ) hidden_states = outputs.hidden_states if return_dict else outputs[1] feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: feature_maps += (hidden_state,) if not return_dict: if output_hidden_states: output = (feature_maps,) + outputs[1:] else: output = (feature_maps,) + outputs[2:] return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/vitdet/configuration_vitdet.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ VitDet model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) from ..deprecated._archive_maps import VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class VitDetConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VitDetModel`]. It is used to instantiate an VitDet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VitDet [google/vitdet-base-patch16-224](https://huggingface.co/google/vitdet-base-patch16-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. mlp_ratio (`int`, *optional*, defaults to 4): Ratio of mlp hidden dim to embedding dim. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. pretrain_image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image during pretraining. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. drop_path_rate (`float`, *optional*, defaults to 0.0): Stochastic depth rate. window_block_indices (`List[int]`, *optional*, defaults to `[]`): List of indices of blocks that should have window attention instead of regular global self-attention. residual_block_indices (`List[int]`, *optional*, defaults to `[]`): List of indices of blocks that should have an extra residual block after the MLP. use_absolute_position_embeddings (`bool`, *optional*, defaults to `True`): Whether to add absolute position embeddings to the patch embeddings. use_relative_position_embeddings (`bool`, *optional*, defaults to `False`): Whether to add relative position embeddings to the attention maps. window_size (`int`, *optional*, defaults to 0): The size of the attention window. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import VitDetConfig, VitDetModel >>> # Initializing a VitDet configuration >>> configuration = VitDetConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = VitDetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vitdet" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mlp_ratio=4, hidden_act="gelu", dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-6, image_size=224, pretrain_image_size=224, patch_size=16, num_channels=3, qkv_bias=True, drop_path_rate=0.0, window_block_indices=[], residual_block_indices=[], use_absolute_position_embeddings=True, use_relative_position_embeddings=False, window_size=0, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_ratio = mlp_ratio self.hidden_act = hidden_act self.dropout_prob = dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.pretrain_image_size = pretrain_image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.drop_path_rate = drop_path_rate self.window_block_indices = window_block_indices self.residual_block_indices = residual_block_indices self.use_absolute_position_embeddings = use_absolute_position_embeddings self.use_relative_position_embeddings = use_relative_position_embeddings self.window_size = window_size self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/vitdet/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _import_structure = {"configuration_vitdet": ["VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP", "VitDetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_vitdet"] = [ "VITDET_PRETRAINED_MODEL_ARCHIVE_LIST", "VitDetModel", "VitDetPreTrainedModel", "VitDetBackbone", ] if TYPE_CHECKING: from .configuration_vitdet import VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP, VitDetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vitdet import ( VITDET_PRETRAINED_MODEL_ARCHIVE_LIST, VitDetBackbone, VitDetModel, VitDetPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/perceiver/configuration_perceiver.py
# coding=utf-8 # Copyright Deepmind and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Perceiver model configuration""" from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class PerceiverConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PerceiverModel`]. It is used to instantiate an Perceiver model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Perceiver [deepmind/language-perceiver](https://huggingface.co/deepmind/language-perceiver) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_latents (`int`, *optional*, defaults to 256): The number of latents. d_latents (`int`, *optional*, defaults to 1280): Dimension of the latent embeddings. d_model (`int`, *optional*, defaults to 768): Dimension of the inputs. Should only be provided in case [*PerceiverTextPreprocessor*] is used or no preprocessor is provided. num_blocks (`int`, *optional*, defaults to 1): Number of blocks in the Transformer encoder. num_self_attends_per_block (`int`, *optional*, defaults to 26): The number of self-attention layers per block. num_self_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each self-attention layer in the Transformer encoder. num_cross_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each cross-attention layer in the Transformer encoder. qk_channels (`int`, *optional*): Dimension to project the queries + keys before applying attention in the cross-attention and self-attention layers of the encoder. Will default to preserving the dimension of the queries if not specified. v_channels (`int`, *optional*): Dimension to project the values before applying attention in the cross-attention and self-attention layers of the encoder. Will default to preserving the dimension of the queries if not specified. cross_attention_shape_for_attention (`str`, *optional*, defaults to `"kv"`): Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder. self_attention_widening_factor (`int`, *optional*, defaults to 1): Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder. cross_attention_widening_factor (`int`, *optional*, defaults to 1): Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. use_query_residual (`float`, *optional*, defaults to `True`): Whether to add a query residual in the cross-attention layer of the encoder. vocab_size (`int`, *optional*, defaults to 262): Vocabulary size for the masked language modeling model. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that the masked language modeling model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). image_size (`int`, *optional*, defaults to 56): Size of the images after preprocessing, for [`PerceiverForImageClassificationLearned`]. train_size (`List[int]`, *optional*, defaults to `[368, 496]`): Training size of the images for the optical flow model. num_frames (`int`, *optional*, defaults to 16): Number of video frames used for the multimodal autoencoding model. audio_samples_per_frame (`int`, *optional*, defaults to 1920): Number of audio samples per frame for the multimodal autoencoding model. samples_per_patch (`int`, *optional*, defaults to 16): Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model. output_shape (`List[int]`, *optional*, defaults to `[1, 16, 224, 224]`): Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal autoencoding model. This excludes the channel dimension. output_num_channels (`int`, *optional*, defaults to 512): Number of output channels for each modalitiy decoder. Example: ```python >>> from transformers import PerceiverModel, PerceiverConfig >>> # Initializing a Perceiver deepmind/language-perceiver style configuration >>> configuration = PerceiverConfig() >>> # Initializing a model from the deepmind/language-perceiver style configuration >>> model = PerceiverModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "perceiver" def __init__( self, num_latents=256, d_latents=1280, d_model=768, num_blocks=1, num_self_attends_per_block=26, num_self_attention_heads=8, num_cross_attention_heads=8, qk_channels=None, v_channels=None, cross_attention_shape_for_attention="kv", self_attention_widening_factor=1, cross_attention_widening_factor=1, hidden_act="gelu", attention_probs_dropout_prob=0.1, initializer_range=0.02, layer_norm_eps=1e-12, use_query_residual=True, vocab_size=262, max_position_embeddings=2048, image_size=56, train_size=[368, 496], num_frames=16, audio_samples_per_frame=1920, samples_per_patch=16, output_shape=[1, 16, 224, 224], output_num_channels=512, _label_trainable_num_channels=1024, **kwargs, ): super().__init__(**kwargs) self.num_latents = num_latents self.d_latents = d_latents self.d_model = d_model self.num_blocks = num_blocks self.num_self_attends_per_block = num_self_attends_per_block self.num_self_attention_heads = num_self_attention_heads self.num_cross_attention_heads = num_cross_attention_heads self.qk_channels = qk_channels self.v_channels = v_channels self.cross_attention_shape_for_attention = cross_attention_shape_for_attention self.self_attention_widening_factor = self_attention_widening_factor self.cross_attention_widening_factor = cross_attention_widening_factor self.hidden_act = hidden_act self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_query_residual = use_query_residual # masked language modeling attributes self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings # image classification attributes self.image_size = image_size # flow attributes self.train_size = train_size # multimodal autoencoding attributes self.num_frames = num_frames self.audio_samples_per_frame = audio_samples_per_frame self.samples_per_patch = samples_per_patch self.output_shape = output_shape self.output_num_channels = output_num_channels self._label_trainable_num_channels = _label_trainable_num_channels class PerceiverOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def atol_for_validation(self) -> float: return 1e-4 def generate_dummy_inputs( self, preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], batch_size: int = -1, seq_length: int = -1, num_choices: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, num_channels: int = 3, image_width: int = 40, image_height: int = 40, ) -> Mapping[str, Any]: # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(preprocessor, PreTrainedTokenizerBase): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = preprocessor.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence dummy_input = [" ".join(["a"]) * seq_length] * batch_size inputs = dict(preprocessor(dummy_input, return_tensors=framework)) inputs["inputs"] = inputs.pop("input_ids") return inputs elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) inputs = dict(preprocessor(images=dummy_input, return_tensors=framework)) inputs["inputs"] = inputs.pop("pixel_values") return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/perceiver/feature_extraction_perceiver.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for Perceiver.""" import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor logger = logging.get_logger(__name__) class PerceiverFeatureExtractor(PerceiverImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PerceiverImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/perceiver/tokenization_perceiver.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for Perceiver.""" from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) class PerceiverTokenizer(PreTrainedTokenizer): """ Construct a Perceiver tokenizer. The Perceiver simply uses raw bytes utf-8 encoding. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. bos_token (`str`, *optional*, defaults to `"[BOS]"`): The BOS token (reserved in the vocab, but not actually used). eos_token (`str`, *optional*, defaults to `"[EOS]"`): The end of sequence token (reserved in the vocab, but not actually used). <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> mask_token (`str`, *optional*, defaults to `"[MASK]"`): The MASK token, useful for masked language modeling. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The CLS token (reserved in the vocab, but not actually used). sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from two sequences. """ model_input_names = ["input_ids", "attention_mask"] def __init__( self, pad_token="[PAD]", bos_token="[BOS]", eos_token="[EOS]", mask_token="[MASK]", cls_token="[CLS]", sep_token="[SEP]", model_max_length=2048, **kwargs, ) -> None: pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token mask_token = AddedToken(mask_token, lstrip=False, rstrip=False) if isinstance(mask_token, str) else mask_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token self._utf_vocab_size = 2**8 # utf is 8 bits # Since these tokens are not part of the vocabulary, we manually add them self._added_tokens_decoder: Dict[str, int] = { 0: pad_token, 1: bos_token, 2: eos_token, 3: mask_token, 4: cls_token, 5: sep_token, } self._num_special_tokens = len(self._added_tokens_decoder) super().__init__( pad_token=pad_token, bos_token=bos_token, eos_token=eos_token, mask_token=mask_token, cls_token=cls_token, sep_token=sep_token, model_max_length=model_max_length, **kwargs, ) def get_vocab(self) -> Dict[str, int]: vocab = {} for i in range(self._utf_vocab_size): token = chr(i) vocab[token] = i + self._num_special_tokens vocab.update(self.added_tokens_encoder) return vocab @property def vocab_size(self): return self._utf_vocab_size def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) # normal case: some special tokens if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks. A sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] else: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1 + [self.sep_token_id] def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" tokens = [chr(i) for i in text.encode("utf-8")] return tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if len(token) != 1: token_id = self.unk_token_id else: token_id = ord(token) + self._num_special_tokens return token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = chr(index - self._num_special_tokens) return token # TODO @ArthurZ refactor this as well.... def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" bstring = b"" for token in tokens: if token in self.added_tokens_encoder: tok_string = str(token).encode("utf-8") else: tok_string = bytes([ord(token)]) bstring += tok_string string = bstring.decode("utf-8", errors="replace") return string # PerceiverTokenizer has no vocab file def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: return ()
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Perceiver checkpoints originally implemented in Haiku.""" import argparse import json import pickle from pathlib import Path import haiku as hk import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( PerceiverConfig, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverImageProcessor, PerceiverTokenizer, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def prepare_img(): # We will verify our results on an image of a dog url = "https://storage.googleapis.com/perceiver_io/dalmation.jpg" im = Image.open(requests.get(url, stream=True).raw) return im def rename_keys(state_dict, architecture): for name in list(state_dict): param = state_dict.pop(name) # PREPROCESSORS # rename text preprocessor embeddings (for MLM model) name = name.replace("embed/embeddings", "input_preprocessor.embeddings.weight") if name.startswith("trainable_position_encoding/pos_embs"): name = name.replace( "trainable_position_encoding/pos_embs", "input_preprocessor.position_embeddings.weight" ) # rename image preprocessor embeddings (for image classification model with learned position embeddings) name = name.replace("image_preprocessor/~/conv2_d/w", "input_preprocessor.convnet_1x1.weight") name = name.replace("image_preprocessor/~/conv2_d/b", "input_preprocessor.convnet_1x1.bias") name = name.replace( "image_preprocessor/~_build_network_inputs/trainable_position_encoding/pos_embs", "input_preprocessor.position_embeddings.position_embeddings", ) name = name.replace( "image_preprocessor/~_build_network_inputs/position_encoding_projector/linear/w", "input_preprocessor.positions_projection.weight", ) name = name.replace( "image_preprocessor/~_build_network_inputs/position_encoding_projector/linear/b", "input_preprocessor.positions_projection.bias", ) # rename image preprocessor embeddings (for image classification model with conv processing) if "counter" in name or "hidden" in name: continue name = name.replace( "image_preprocessor/~/conv2_d_downsample/~/conv/w", "input_preprocessor.convnet.conv.weight" ) name = name.replace( "image_preprocessor/~/conv2_d_downsample/~/batchnorm/offset", "input_preprocessor.convnet.batchnorm.bias" ) name = name.replace( "image_preprocessor/~/conv2_d_downsample/~/batchnorm/scale", "input_preprocessor.convnet.batchnorm.weight" ) name = name.replace( "image_preprocessor/~/conv2_d_downsample/~/batchnorm/~/mean_ema/average", "input_preprocessor.convnet.batchnorm.running_mean", ) name = name.replace( "image_preprocessor/~/conv2_d_downsample/~/batchnorm/~/var_ema/average", "input_preprocessor.convnet.batchnorm.running_var", ) # rename image preprocessor embeddings (for optical flow model) name = name.replace("image_preprocessor/patches_linear/b", "input_preprocessor.conv_after_patches.bias") name = name.replace("image_preprocessor/patches_linear/w", "input_preprocessor.conv_after_patches.weight") # rename multimodal preprocessor embeddings name = name.replace("multimodal_preprocessor/audio_mask_token/pos_embs", "input_preprocessor.mask.audio") name = name.replace("multimodal_preprocessor/audio_padding/pos_embs", "input_preprocessor.padding.audio") name = name.replace("multimodal_preprocessor/image_mask_token/pos_embs", "input_preprocessor.mask.image") name = name.replace("multimodal_preprocessor/image_padding/pos_embs", "input_preprocessor.padding.image") name = name.replace("multimodal_preprocessor/label_mask_token/pos_embs", "input_preprocessor.mask.label") name = name.replace("multimodal_preprocessor/label_padding/pos_embs", "input_preprocessor.padding.label") # DECODERS # rename prefix of decoders # multimodal autoencoding model name = name.replace( "multimodal_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention." ) name = name.replace("multimodal_decoder/~decoder_query/audio_padding/pos_embs", "decoder.padding.audio") name = name.replace("multimodal_decoder/~decoder_query/image_padding/pos_embs", "decoder.padding.image") name = name.replace("multimodal_decoder/~decoder_query/label_padding/pos_embs", "decoder.padding.label") name = name.replace("multimodal_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias") name = name.replace("multimodal_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight") if architecture == "multimodal_autoencoding": name = name.replace( "classification_decoder/~/basic_decoder/~/trainable_position_encoding/pos_embs", "decoder.modalities.label.decoder.output_position_encodings.position_embeddings", ) # flow model name = name.replace( "flow_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention." ) name = name.replace("flow_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight") name = name.replace("flow_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias") # image models name = name.replace( "classification_decoder/~/basic_decoder/~/trainable_position_encoding/pos_embs", "decoder.decoder.output_position_encodings.position_embeddings", ) name = name.replace( "basic_decoder/~/trainable_position_encoding/pos_embs", "decoder.output_position_encodings.position_embeddings", ) name = name.replace( "classification_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention." ) name = name.replace("classification_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias") name = name.replace("classification_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight") name = name = name.replace("classification_decoder/~/basic_decoder/~/", "decoder.decoder.") name = name.replace("basic_decoder/cross_attention/", "decoder.decoding_cross_attention.") name = name.replace("basic_decoder/~/", "decoder.") # POSTPROCESSORS name = name.replace( "projection_postprocessor/linear/b", "output_postprocessor.modalities.image.classifier.bias" ) name = name.replace( "projection_postprocessor/linear/w", "output_postprocessor.modalities.image.classifier.weight" ) name = name.replace( "classification_postprocessor/linear/b", "output_postprocessor.modalities.label.classifier.bias" ) name = name.replace( "classification_postprocessor/linear/w", "output_postprocessor.modalities.label.classifier.weight" ) name = name.replace("audio_postprocessor/linear/b", "output_postprocessor.modalities.audio.classifier.bias") name = name.replace("audio_postprocessor/linear/w", "output_postprocessor.modalities.audio.classifier.weight") # PERCEIVER MODEL # rename latent embeddings name = name.replace("perceiver_encoder/~/trainable_position_encoding/pos_embs", "embeddings.latents") # rename latent embeddings (for multimodal model) name = name.replace("encoder/~/trainable_position_encoding/pos_embs", "embeddings.latents") # rename prefixes if name.startswith("perceiver_encoder/~/"): if "self_attention" in name: suffix = "self_attends." else: suffix = "" name = name.replace("perceiver_encoder/~/", "encoder." + suffix) if name.startswith("encoder/~/"): if "self_attention" in name: suffix = "self_attends." else: suffix = "" name = name.replace("encoder/~/", "encoder." + suffix) # rename layernorm parameters if "offset" in name: name = name.replace("offset", "bias") if "scale" in name: name = name.replace("scale", "weight") # in HuggingFace, the layernorm in between attention + MLP is just called "layernorm" # rename layernorm in between attention + MLP of cross-attention if "cross_attention" in name and "layer_norm_2" in name: name = name.replace("layer_norm_2", "layernorm") # rename layernorm in between attention + MLP of self-attention if "self_attention" in name and "layer_norm_1" in name: name = name.replace("layer_norm_1", "layernorm") # in HuggingFace, the layernorms for queries + keys are called "layernorm1" and "layernorm2" if "cross_attention" in name and "layer_norm_1" in name: name = name.replace("layer_norm_1", "attention.self.layernorm2") if "cross_attention" in name and "layer_norm" in name: name = name.replace("layer_norm", "attention.self.layernorm1") if "self_attention" in name and "layer_norm" in name: name = name.replace("layer_norm", "attention.self.layernorm1") # rename special characters by dots name = name.replace("-", ".") name = name.replace("/", ".") # rename keys, queries, values and output of attention layers if ("cross_attention" in name or "self_attention" in name) and "mlp" not in name: if "linear.b" in name: name = name.replace("linear.b", "self.query.bias") if "linear.w" in name: name = name.replace("linear.w", "self.query.weight") if "linear_1.b" in name: name = name.replace("linear_1.b", "self.key.bias") if "linear_1.w" in name: name = name.replace("linear_1.w", "self.key.weight") if "linear_2.b" in name: name = name.replace("linear_2.b", "self.value.bias") if "linear_2.w" in name: name = name.replace("linear_2.w", "self.value.weight") if "linear_3.b" in name: name = name.replace("linear_3.b", "output.dense.bias") if "linear_3.w" in name: name = name.replace("linear_3.w", "output.dense.weight") if "self_attention_" in name: name = name.replace("self_attention_", "") if "self_attention" in name: name = name.replace("self_attention", "0") # rename dense layers of 2-layer MLP if "mlp" in name: if "linear.b" in name: name = name.replace("linear.b", "dense1.bias") if "linear.w" in name: name = name.replace("linear.w", "dense1.weight") if "linear_1.b" in name: name = name.replace("linear_1.b", "dense2.bias") if "linear_1.w" in name: name = name.replace("linear_1.w", "dense2.weight") # finally, TRANSPOSE if kernel and not embedding layer, and set value if name[-6:] == "weight" and "embeddings" not in name: param = np.transpose(param) # if batchnorm, we need to squeeze it if "batchnorm" in name: param = np.squeeze(param) if "embedding_decoder" not in name: state_dict["perceiver." + name] = torch.from_numpy(param) else: state_dict[name] = torch.from_numpy(param) @torch.no_grad() def convert_perceiver_checkpoint(pickle_file, pytorch_dump_folder_path, architecture="MLM"): """ Copy/paste/tweak model's weights to our Perceiver structure. """ # load parameters as FlatMapping data structure with open(pickle_file, "rb") as f: checkpoint = pickle.loads(f.read()) state = None if isinstance(checkpoint, dict) and architecture in [ "image_classification", "image_classification_fourier", "image_classification_conv", ]: # the image classification_conv checkpoint also has batchnorm states (running_mean and running_var) params = checkpoint["params"] state = checkpoint["state"] else: params = checkpoint # turn into initial state dict state_dict = {} for scope_name, parameters in hk.data_structures.to_mutable_dict(params).items(): for param_name, param in parameters.items(): state_dict[scope_name + "/" + param_name] = param if state is not None: # add state variables for scope_name, parameters in hk.data_structures.to_mutable_dict(state).items(): for param_name, param in parameters.items(): state_dict[scope_name + "/" + param_name] = param # rename keys rename_keys(state_dict, architecture=architecture) # load HuggingFace model config = PerceiverConfig() subsampling = None repo_id = "huggingface/label-files" if architecture == "MLM": config.qk_channels = 8 * 32 config.v_channels = 1280 model = PerceiverForMaskedLM(config) elif "image_classification" in architecture: config.num_latents = 512 config.d_latents = 1024 config.d_model = 512 config.num_blocks = 8 config.num_self_attends_per_block = 6 config.num_cross_attention_heads = 1 config.num_self_attention_heads = 8 config.qk_channels = None config.v_channels = None # set labels config.num_labels = 1000 filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if architecture == "image_classification": config.image_size = 224 model = PerceiverForImageClassificationLearned(config) elif architecture == "image_classification_fourier": config.d_model = 261 model = PerceiverForImageClassificationFourier(config) elif architecture == "image_classification_conv": config.d_model = 322 model = PerceiverForImageClassificationConvProcessing(config) else: raise ValueError(f"Architecture {architecture} not supported") elif architecture == "optical_flow": config.num_latents = 2048 config.d_latents = 512 config.d_model = 322 config.num_blocks = 1 config.num_self_attends_per_block = 24 config.num_self_attention_heads = 16 config.num_cross_attention_heads = 1 model = PerceiverForOpticalFlow(config) elif architecture == "multimodal_autoencoding": config.num_latents = 28 * 28 * 1 config.d_latents = 512 config.d_model = 704 config.num_blocks = 1 config.num_self_attends_per_block = 8 config.num_self_attention_heads = 8 config.num_cross_attention_heads = 1 config.num_labels = 700 # define dummy inputs + subsampling (as each forward pass is only on a chunk of image + audio data) images = torch.randn((1, 16, 3, 224, 224)) audio = torch.randn((1, 30720, 1)) nchunks = 128 image_chunk_size = np.prod((16, 224, 224)) // nchunks audio_chunk_size = audio.shape[1] // config.samples_per_patch // nchunks # process the first chunk chunk_idx = 0 subsampling = { "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)), "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)), "label": None, } model = PerceiverForMultimodalAutoencoding(config) # set labels filename = "kinetics700-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} else: raise ValueError(f"Architecture {architecture} not supported") model.eval() # load weights model.load_state_dict(state_dict) # prepare dummy input input_mask = None if architecture == "MLM": tokenizer = PerceiverTokenizer.from_pretrained("/Users/NielsRogge/Documents/Perceiver/Tokenizer files") text = "This is an incomplete sentence where some words are missing." encoding = tokenizer(text, padding="max_length", return_tensors="pt") # mask " missing.". Note that the model performs much better if the masked chunk starts with a space. encoding.input_ids[0, 51:60] = tokenizer.mask_token_id inputs = encoding.input_ids input_mask = encoding.attention_mask elif architecture in ["image_classification", "image_classification_fourier", "image_classification_conv"]: image_processor = PerceiverImageProcessor() image = prepare_img() encoding = image_processor(image, return_tensors="pt") inputs = encoding.pixel_values elif architecture == "optical_flow": inputs = torch.randn(1, 2, 27, 368, 496) elif architecture == "multimodal_autoencoding": images = torch.randn((1, 16, 3, 224, 224)) audio = torch.randn((1, 30720, 1)) inputs = {"image": images, "audio": audio, "label": torch.zeros((images.shape[0], 700))} # forward pass if architecture == "multimodal_autoencoding": outputs = model(inputs=inputs, attention_mask=input_mask, subsampled_output_points=subsampling) else: outputs = model(inputs=inputs, attention_mask=input_mask) logits = outputs.logits # verify logits if not isinstance(logits, dict): print("Shape of logits:", logits.shape) else: for k, v in logits.items(): print(f"Shape of logits of modality {k}", v.shape) if architecture == "MLM": expected_slice = torch.tensor( [[-11.8336, -11.6850, -11.8483], [-12.8149, -12.5863, -12.7904], [-12.8440, -12.6410, -12.8646]] ) assert torch.allclose(logits[0, :3, :3], expected_slice) masked_tokens_predictions = logits[0, 51:60].argmax(dim=-1).tolist() expected_list = [38, 115, 111, 121, 121, 111, 116, 109, 52] assert masked_tokens_predictions == expected_list print("Greedy predictions:") print(masked_tokens_predictions) print() print("Predicted string:") print(tokenizer.decode(masked_tokens_predictions)) elif architecture in ["image_classification", "image_classification_fourier", "image_classification_conv"]: print("Predicted class:", model.config.id2label[logits.argmax(-1).item()]) # Finally, save files Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pickle_file", type=str, default=None, required=True, help="Path to local pickle file of a Perceiver checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model directory, provided as a string.", ) parser.add_argument( "--architecture", default="MLM", type=str, help=""" Architecture, provided as a string. One of 'MLM', 'image_classification', image_classification_fourier', image_classification_fourier', 'optical_flow' or 'multimodal_autoencoding'. """, ) args = parser.parse_args() convert_perceiver_checkpoint(args.pickle_file, args.pytorch_dump_folder_path, args.architecture)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/perceiver/modeling_perceiver.py
# coding=utf-8 # Copyright 2021 Deepmind and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Perceiver model.""" import abc import math from dataclasses import dataclass from functools import reduce from operator import __add__ from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_perceiver import PerceiverConfig ModalitySizeType = Mapping[str, int] PreprocessorOutputType = Tuple[torch.Tensor, Optional[torch.Tensor], torch.Tensor] PreprocessorType = Callable[..., PreprocessorOutputType] PostprocessorType = Callable[..., Any] logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "deepmind/language-perceiver" _CONFIG_FOR_DOC = "PerceiverConfig" from ..deprecated._archive_maps import PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 @dataclass class PerceiverModelOutput(ModelOutput): """ Base class for Perceiver base model's outputs, with potential hidden states, attentions and cross-attentions. Args: logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ logits: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class PerceiverDecoderOutput(ModelOutput): """ Base class for Perceiver decoder outputs, with potential cross-attentions. Args: logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`): Output of the basic decoder. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ logits: torch.FloatTensor = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class PerceiverMaskedLMOutput(ModelOutput): """ Base class for Perceiver's masked language model outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Masked language modeling (MLM) loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_latents, num_latents)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class PerceiverClassifierOutput(ModelOutput): """ Base class for Perceiver's outputs of sequence/image classification models, optical flow and multimodal autoencoding. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None class PerceiverEmbeddings(nn.Module): """Construct the latent embeddings.""" def __init__(self, config): super().__init__() self.latents = nn.Parameter(torch.randn(config.num_latents, config.d_latents)) def forward(self, batch_size: int): return self.latents.expand(batch_size, -1, -1) # Thanks, Phil Wang class PerceiverSelfAttention(nn.Module): """Multi-headed {cross, self}-attention. Can be used both in the encoder as well as in the decoder.""" def __init__( self, config, is_cross_attention=False, qk_channels=None, v_channels=None, num_heads=1, q_dim=None, kv_dim=None, ): super().__init__() self.num_heads = num_heads # Q and K must have the same number of channels. # Default to preserving Q's input's shape. if qk_channels is None: qk_channels = q_dim # V's num_channels determines the shape of the output of QKV-attention. # Default to the same number of channels used in the key-query operation. if v_channels is None: v_channels = qk_channels if qk_channels % num_heads != 0: raise ValueError(f"qk_channels ({qk_channels}) must be divisible by num_heads ({num_heads}).") if v_channels % num_heads != 0: raise ValueError(f"v_channels ({v_channels}) must be divisible by num_heads ({num_heads}).") self.qk_channels = qk_channels self.v_channels = v_channels self.qk_channels_per_head = self.qk_channels // num_heads self.v_channels_per_head = self.v_channels // num_heads # Layer normalization self.layernorm1 = nn.LayerNorm(q_dim) self.layernorm2 = nn.LayerNorm(kv_dim) if is_cross_attention else nn.Identity() # Projection matrices self.query = nn.Linear(q_dim, qk_channels) self.key = nn.Linear(kv_dim, qk_channels) self.value = nn.Linear(kv_dim, v_channels) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x, channels_per_head): new_x_shape = x.size()[:-1] + (self.num_heads, channels_per_head) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs: Optional[torch.FloatTensor] = None, inputs_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: hidden_states = self.layernorm1(hidden_states) inputs = self.layernorm2(inputs) # Project queries, keys and values to a common feature dimension. If this is instantiated as a cross-attention module, # the keys and values come from the inputs; the attention mask needs to be such that the inputs's non-relevant tokens are not attended to. is_cross_attention = inputs is not None queries = self.query(hidden_states) if is_cross_attention: keys = self.key(inputs) values = self.value(inputs) attention_mask = inputs_mask else: keys = self.key(hidden_states) values = self.value(hidden_states) # Reshape channels for multi-head attention. # We reshape from (batch_size, time, channels) to (batch_size, num_heads, time, channels per head) queries = self.transpose_for_scores(queries, self.qk_channels_per_head) keys = self.transpose_for_scores(keys, self.qk_channels_per_head) values = self.transpose_for_scores(values, self.v_channels_per_head) # Take the dot product between the queries and keys to get the raw attention scores. attention_scores = torch.matmul(queries, keys.transpose(-1, -2)) batch_size, num_heads, seq_len, q_head_dim = queries.shape _, _, _, v_head_dim = values.shape hiddens = self.num_heads * v_head_dim attention_scores = attention_scores / math.sqrt(q_head_dim) if attention_mask is not None: # Apply the attention mask (precomputed for all layers in PerceiverModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, values) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (hiddens,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class PerceiverSelfOutput(nn.Module): def __init__(self, config, input_channels, output_channels): super().__init__() self.dense = nn.Linear(input_channels, output_channels) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) return hidden_states class PerceiverAttention(nn.Module): """Attention module, including a dense block.""" def __init__( self, config, is_cross_attention=False, qk_channels=None, v_channels=None, num_heads=1, q_dim=None, kv_dim=None, use_query_residual=True, ): super().__init__() # MultiHead attention if is_cross_attention and qk_channels is None: if config.cross_attention_shape_for_attention == "q": qk_channels = q_dim elif config.cross_attention_shape_for_attention == "kv": qk_channels = kv_dim else: raise ValueError( f"Unknown value {config.cross_attention_shape_for_attention} for " "cross_attention_shape_for_attention." ) else: if qk_channels is None: qk_channels = q_dim if v_channels is None: v_channels = qk_channels self.self = PerceiverSelfAttention( config, is_cross_attention=is_cross_attention, qk_channels=qk_channels, v_channels=v_channels, num_heads=num_heads, q_dim=q_dim, kv_dim=kv_dim, ) # dense block output_channels = None if is_cross_attention: output_channels = q_dim else: if output_channels is None: output_channels = v_channels self.output = PerceiverSelfOutput(config, input_channels=self.self.v_channels, output_channels=output_channels) self.use_query_residual = use_query_residual self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs: Optional[torch.FloatTensor] = None, inputs_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, inputs, inputs_mask, output_attentions, ) # Output projection attention_output = self.output(self_outputs[0]) # Optionally include a residual to the original queries. # Consider omitting the residual if the semantics of query and output # are different, e.g. if queries are positions and outputs are pixels. if self.use_query_residual: attention_output = attention_output + hidden_states outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class PerceiverMLP(nn.Module): """A Transformer-style dense module to follow attention.""" def __init__(self, config, input_size, widening_factor): super().__init__() self.dense1 = nn.Linear(input_size, widening_factor * input_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(widening_factor * input_size, input_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dense2(hidden_states) return hidden_states class PerceiverLayer(nn.Module): def __init__( self, config, is_cross_attention=False, qk_channels=None, v_channels=None, num_heads=1, q_dim=None, kv_dim=None, widening_factor=4, use_query_residual=True, ): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = PerceiverAttention( config, is_cross_attention=is_cross_attention, qk_channels=qk_channels, v_channels=v_channels, num_heads=num_heads, q_dim=q_dim, kv_dim=kv_dim, use_query_residual=use_query_residual, ) self.layernorm = nn.LayerNorm(q_dim) self.mlp = PerceiverMLP(config, input_size=q_dim, widening_factor=widening_factor) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs: Optional[torch.FloatTensor] = None, inputs_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: attention_outputs = self.attention( hidden_states, attention_mask, head_mask, inputs, inputs_mask, output_attentions, ) attention_output = attention_outputs[0] outputs = attention_outputs[1:] # add attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) layer_output = layer_output + attention_output # residual connection outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): layer_output = self.layernorm(attention_output) layer_output = self.mlp(layer_output) return layer_output class PerceiverEncoder(nn.Module): """The Perceiver Encoder: a scalable, fully attentional encoder.""" def __init__(self, config, kv_dim=None): super().__init__() self.config = config # Check that we can use multihead-attention with these shapes. if config.d_latents % config.num_self_attention_heads != 0: raise ValueError( f"num_z_channels ({config.d_latents}) must be divisible by" f" num_self_attend_heads ({config.num_self_attention_heads})." ) if config.d_latents % config.num_cross_attention_heads != 0: raise ValueError( f"num_z_channels ({config.d_latents}) must be divisible by" f" num_cross_attend_heads ({config.num_cross_attention_heads})." ) # Construct the cross attention layer. self.cross_attention = PerceiverLayer( config, is_cross_attention=True, qk_channels=config.qk_channels, v_channels=config.v_channels, num_heads=config.num_cross_attention_heads, q_dim=config.d_latents, kv_dim=kv_dim, widening_factor=config.cross_attention_widening_factor, use_query_residual=config.use_query_residual, ) # Construct a single block of self-attention layers. # We get deeper architectures by applying this block more than once. self_attention_layers = [] for _ in range(config.num_self_attends_per_block): layer = PerceiverLayer( config, is_cross_attention=False, qk_channels=config.qk_channels, v_channels=config.v_channels, num_heads=config.num_self_attention_heads, q_dim=config.d_latents, kv_dim=config.d_latents, widening_factor=config.self_attention_widening_factor, ) self_attention_layers.append(layer) self.self_attends = nn.ModuleList(self_attention_layers) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs: Optional[torch.FloatTensor] = None, inputs_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions else None # Apply the cross-attention between the latents (hidden_states) and inputs: layer_outputs = self.cross_attention( hidden_states, attention_mask=attention_mask, head_mask=None, inputs=inputs, inputs_mask=inputs_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_cross_attentions = all_cross_attentions + (layer_outputs[1],) # Apply the block of self-attention layers more than once: for _ in range(self.config.num_blocks): for i, layer_module in enumerate(self.self_attends): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None ) return BaseModelOutputWithCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class PerceiverPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = PerceiverConfig base_model_prefix = "perceiver" main_input_name = "inputs" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif hasattr(module, "latents"): module.latents.data.normal_(mean=0.0, std=self.config.initializer_range) elif hasattr(module, "position_embeddings") and isinstance(module, PerceiverTrainablePositionEncoding): module.position_embeddings.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.ParameterDict): for modality in module.keys(): module[modality].data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) PERCEIVER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PerceiverConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ PERCEIVER_MODEL_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PerceiverConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. decoder (*DecoderType*, *optional*): Optional decoder to use to decode the latent representation of the encoder. Examples include *transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder*, *transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder*, *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder*. input_preprocessor (*PreprocessorType*, *optional*): Optional input preprocessor to use. Examples include *transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor*. output_postprocessor (*PostprocessorType*, *optional*): Optional output postprocessor to use. Examples include *transformers.models.perceiver.modeling_perceiver.PerceiverImagePostprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor*, *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor*. Note that you can define your own decoders, preprocessors and/or postprocessors to fit your use-case. """ PERCEIVER_INPUTS_DOCSTRING = r""" Args: inputs (`torch.FloatTensor`): Inputs to the perceiver. Can be anything: images, text, audio, video, etc. attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """The Perceiver: a scalable, fully attentional architecture.""", PERCEIVER_MODEL_START_DOCSTRING, ) class PerceiverModel(PerceiverPreTrainedModel): def __init__( self, config, decoder=None, input_preprocessor: PreprocessorType = None, output_postprocessor: PostprocessorType = None, ): super().__init__(config) self.config = config self.input_preprocessor = input_preprocessor self.output_postprocessor = output_postprocessor self.embeddings = PerceiverEmbeddings(config) self.encoder = PerceiverEncoder( config, kv_dim=input_preprocessor.num_channels if input_preprocessor is not None else config.d_model ) self.decoder = decoder # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.latents def set_input_embeddings(self, value): self.embeddings.latents = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) @replace_return_docstrings(output_type=PerceiverModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: torch.FloatTensor, attention_mask: Optional[torch.FloatTensor] = None, subsampled_output_points: Optional[Dict[str, torch.Tensor]] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, PerceiverModelOutput]: r""" Returns: Examples: ```python >>> from transformers import PerceiverConfig, PerceiverTokenizer, PerceiverImageProcessor, PerceiverModel >>> from transformers.models.perceiver.modeling_perceiver import ( ... PerceiverTextPreprocessor, ... PerceiverImagePreprocessor, ... PerceiverClassificationDecoder, ... ) >>> import torch >>> import requests >>> from PIL import Image >>> # EXAMPLE 1: using the Perceiver to classify texts >>> # - we define a TextPreprocessor, which can be used to embed tokens >>> # - we define a ClassificationDecoder, which can be used to decode the >>> # final hidden states of the latents to classification logits >>> # using trainable position embeddings >>> config = PerceiverConfig() >>> preprocessor = PerceiverTextPreprocessor(config) >>> decoder = PerceiverClassificationDecoder( ... config, ... num_channels=config.d_latents, ... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1), ... use_query_residual=True, ... ) >>> model = PerceiverModel(config, input_preprocessor=preprocessor, decoder=decoder) >>> # you can then do a forward pass as follows: >>> tokenizer = PerceiverTokenizer() >>> text = "hello world" >>> inputs = tokenizer(text, return_tensors="pt").input_ids >>> with torch.no_grad(): ... outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 2] >>> # to train, one can train the model using standard cross-entropy: >>> criterion = torch.nn.CrossEntropyLoss() >>> labels = torch.tensor([1]) >>> loss = criterion(logits, labels) >>> # EXAMPLE 2: using the Perceiver to classify images >>> # - we define an ImagePreprocessor, which can be used to embed images >>> config = PerceiverConfig(image_size=224) >>> preprocessor = PerceiverImagePreprocessor( ... config, ... prep_type="conv1x1", ... spatial_downsample=1, ... out_channels=256, ... position_encoding_type="trainable", ... concat_or_add_pos="concat", ... project_pos_dim=256, ... trainable_position_encoding_kwargs=dict( ... num_channels=256, ... index_dims=config.image_size**2, ... ), ... ) >>> model = PerceiverModel( ... config, ... input_preprocessor=preprocessor, ... decoder=PerceiverClassificationDecoder( ... config, ... num_channels=config.d_latents, ... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1), ... use_query_residual=True, ... ), ... ) >>> # you can then do a forward pass as follows: >>> image_processor = PerceiverImageProcessor() >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor(image, return_tensors="pt").pixel_values >>> with torch.no_grad(): ... outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 2] >>> # to train, one can train the model using standard cross-entropy: >>> criterion = torch.nn.CrossEntropyLoss() >>> labels = torch.tensor([1]) >>> loss = criterion(logits, labels) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.input_preprocessor is not None: inputs, modality_sizes, inputs_without_pos = self.input_preprocessor(inputs) else: modality_sizes = None inputs_without_pos = None if inputs.size()[-1] != self.config.d_model: raise ValueError( f"Last dimension of the inputs: {inputs.size()[-1]} doesn't correspond to config.d_model:" f" {self.config.d_model}. Make sure to set config.d_model appropriately." ) batch_size, seq_length, _ = inputs.size() device = inputs.device # If no attention mask is provided, make them all ones if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) # Make the attention mask broadcastable to [batch_size, num_heads, seq_length, seq_length] extended_attention_mask = self.invert_attention_mask(attention_mask) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_blocks x num_heads] # and head_mask is converted to shape [num_blocks x batch x num_heads x N x N] head_mask = self.get_head_mask(head_mask, self.config.num_blocks * self.config.num_self_attends_per_block) embedding_output = self.embeddings(batch_size=batch_size) encoder_outputs = self.encoder( embedding_output, attention_mask=None, head_mask=head_mask, inputs=inputs, inputs_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] logits = None if self.decoder: if subsampled_output_points is not None: output_modality_sizes = { "audio": subsampled_output_points["audio"].shape[0], "image": subsampled_output_points["image"].shape[0], "label": 1, } else: output_modality_sizes = modality_sizes decoder_query = self.decoder.decoder_query( inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_output_points ) decoder_outputs = self.decoder( decoder_query, z=sequence_output, query_mask=extended_attention_mask, output_attentions=output_attentions, ) logits = decoder_outputs.logits # add cross-attentions of decoder if output_attentions and decoder_outputs.cross_attentions is not None: if return_dict: encoder_outputs.cross_attentions = ( encoder_outputs.cross_attentions + decoder_outputs.cross_attentions ) else: encoder_outputs = encoder_outputs + decoder_outputs.cross_attentions if self.output_postprocessor: logits = self.output_postprocessor(logits, modality_sizes=output_modality_sizes) if not return_dict: if logits is not None: return (logits, sequence_output) + encoder_outputs[1:] else: return (sequence_output,) + encoder_outputs[1:] return PerceiverModelOutput( logits=logits, last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings("""Example use of Perceiver for masked language modeling.""", PERCEIVER_START_DOCSTRING) class PerceiverForMaskedLM(PerceiverPreTrainedModel): def __init__(self, config: PerceiverConfig): super().__init__(config) text_preprocessor = PerceiverTextPreprocessor(config) trainable_position_encoding_kwargs_decoder = { "num_channels": text_preprocessor.num_channels, "index_dims": config.max_position_embeddings, } self.perceiver = PerceiverModel( config, input_preprocessor=text_preprocessor, decoder=PerceiverBasicDecoder( config, output_num_channels=config.d_latents, output_index_dims=config.max_position_embeddings, # we need to define the seq_len of the inputs beforehand num_channels=text_preprocessor.num_channels, qk_channels=8 * 32, v_channels=text_preprocessor.num_channels, num_heads=8, use_query_residual=False, final_project=False, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, ), ) self.embedding_decoder = PerceiverEmbeddingDecoder(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, input_ids: Optional[torch.Tensor] = None, ) -> Union[Tuple, PerceiverMaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, PerceiverForMaskedLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver") >>> model = PerceiverForMaskedLM.from_pretrained("deepmind/language-perceiver") >>> # training >>> text = "This is an incomplete sentence where some words are missing." >>> inputs = tokenizer(text, padding="max_length", return_tensors="pt") >>> # mask " missing." >>> inputs["input_ids"][0, 52:61] = tokenizer.mask_token_id >>> labels = tokenizer(text, padding="max_length", return_tensors="pt").input_ids >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> round(loss.item(), 2) 19.87 >>> logits = outputs.logits >>> list(logits.shape) [1, 2048, 262] >>> # inference >>> text = "This is an incomplete sentence where some words are missing." >>> encoding = tokenizer(text, padding="max_length", return_tensors="pt") >>> # mask bytes corresponding to " missing.". Note that the model performs much better if the masked span starts with a space. >>> encoding["input_ids"][0, 52:61] = tokenizer.mask_token_id >>> # forward pass >>> with torch.no_grad(): ... outputs = model(**encoding) >>> logits = outputs.logits >>> list(logits.shape) [1, 2048, 262] >>> masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist() >>> tokenizer.decode(masked_tokens_predictions) ' missing.' ```""" if inputs is not None and input_ids is not None: raise ValueError("You cannot use both `inputs` and `input_ids`") elif inputs is None and input_ids is not None: inputs = input_ids return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.embedding_decoder( outputs.logits if return_dict else outputs[0], embedding_layer=self.perceiver.input_preprocessor.embeddings ) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return PerceiverMaskedLMOutput( loss=masked_lm_loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings("""Example use of Perceiver for text classification.""", PERCEIVER_START_DOCSTRING) class PerceiverForSequenceClassification(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1} self.num_labels = config.num_labels self.perceiver = PerceiverModel( config, input_preprocessor=PerceiverTextPreprocessor(config), decoder=PerceiverClassificationDecoder( config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True, ), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, input_ids: Optional[torch.Tensor] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoTokenizer, PerceiverForSequenceClassification >>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver") >>> model = PerceiverForSequenceClassification.from_pretrained("deepmind/language-perceiver") >>> text = "hello world" >>> inputs = tokenizer(text, return_tensors="pt").input_ids >>> outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 2] ```""" if inputs is not None and input_ids is not None: raise ValueError("You cannot use both `inputs` and `input_ids`") elif inputs is None and input_ids is not None: inputs = input_ids return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ Example use of Perceiver for image classification, for tasks such as ImageNet. This model uses learned position embeddings. In other words, this model is not given any privileged information about the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet. [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with `prep_type="conv1x1"`) to preprocess the input images, and [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of [`PerceiverModel`] into classification logits. """, PERCEIVER_START_DOCSTRING, ) class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) trainable_position_encoding_kwargs_preprocessor = {"num_channels": 256, "index_dims": config.image_size**2} trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1} self.num_labels = config.num_labels self.perceiver = PerceiverModel( config, input_preprocessor=PerceiverImagePreprocessor( config, prep_type="conv1x1", spatial_downsample=1, out_channels=256, position_encoding_type="trainable", concat_or_add_pos="concat", project_pos_dim=256, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_preprocessor, ), decoder=PerceiverClassificationDecoder( config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True, ), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationLearned >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-learned") >>> model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned") >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values >>> outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 1000] >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: tabby, tabby cat ```""" if inputs is not None and pixel_values is not None: raise ValueError("You cannot use both `inputs` and `pixel_values`") elif inputs is None and pixel_values is not None: inputs = pixel_values return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ Example use of Perceiver for image classification, for tasks such as ImageNet. This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of 79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT). [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with `prep_type="pixels"`) to preprocess the input images, and [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of [`PerceiverModel`] into classification logits. """, PERCEIVER_START_DOCSTRING, ) class PerceiverForImageClassificationFourier(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) fourier_position_encoding_kwargs_preprocessor = { "concat_pos": True, "max_resolution": (224, 224), "num_bands": 64, "sine_only": False, } trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1} self.num_labels = config.num_labels self.perceiver = PerceiverModel( config, input_preprocessor=PerceiverImagePreprocessor( config, prep_type="pixels", spatial_downsample=1, fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor, ), decoder=PerceiverClassificationDecoder( config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True, ), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationFourier >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-fourier") >>> model = PerceiverForImageClassificationFourier.from_pretrained("deepmind/vision-perceiver-fourier") >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values >>> outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 1000] >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: tabby, tabby cat ```""" if inputs is not None and pixel_values is not None: raise ValueError("You cannot use both `inputs` and `pixel_values`") elif inputs is None and pixel_values is not None: inputs = pixel_values return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ Example use of Perceiver for image classification, for tasks such as ImageNet. This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy of 82.1 on ImageNet. [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with `prep_type="conv"`) to preprocess the input images, and [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of [`PerceiverModel`] into classification logits. """, PERCEIVER_START_DOCSTRING, ) class PerceiverForImageClassificationConvProcessing(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) fourier_position_encoding_kwargs_preprocessor = { "concat_pos": True, "max_resolution": (56, 56), "num_bands": 64, "sine_only": False, } trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1} self.num_labels = config.num_labels self.perceiver = PerceiverModel( config, input_preprocessor=PerceiverImagePreprocessor( config, prep_type="conv", spatial_downsample=1, position_encoding_type="fourier", fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor, ), decoder=PerceiverClassificationDecoder( config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True, ), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationConvProcessing >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-conv") >>> model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv") >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values >>> outputs = model(inputs=inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 1000] >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) Predicted class: tabby, tabby cat ```""" if inputs is not None and pixel_values is not None: raise ValueError("You cannot use both `inputs` and `pixel_values`") elif inputs is None and pixel_values is not None: inputs = pixel_values return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ Example use of Perceiver for optical flow, for tasks such as Sintel and KITTI. [`PerceiverForOpticalFlow`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with *prep_type="patches"*) to preprocess the input images, and [`~models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder`] to decode the latent representation of [`PerceiverModel`]. As input, one concatenates 2 subsequent frames along the channel dimension and extract a 3 x 3 patch around each pixel (leading to 3 x 3 x 3 x 2 = 54 values for each pixel). Fixed Fourier position encodings are used to encode the position of each pixel in the patch. Next, one applies the Perceiver encoder. To decode, one queries the latent representation using the same encoding used for the input. """, PERCEIVER_START_DOCSTRING, ) class PerceiverForOpticalFlow(PerceiverPreTrainedModel): def __init__(self, config): super().__init__(config) fourier_position_encoding_kwargs_preprocessor = { "num_bands": 64, "max_resolution": config.train_size, "sine_only": False, "concat_pos": True, } fourier_position_encoding_kwargs_decoder = { "concat_pos": True, "max_resolution": config.train_size, "num_bands": 64, "sine_only": False, } image_preprocessor = PerceiverImagePreprocessor( config, prep_type="patches", spatial_downsample=1, conv_after_patching=True, conv_after_patching_in_channels=54, temporal_downsample=2, position_encoding_type="fourier", # position_encoding_kwargs fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor, ) self.perceiver = PerceiverModel( config, input_preprocessor=image_preprocessor, decoder=PerceiverOpticalFlowDecoder( config, num_channels=image_preprocessor.num_channels, output_image_shape=config.train_size, rescale_factor=100.0, # decoder kwargs use_query_residual=False, output_num_channels=2, # We query the decoder using the first frame features # rather than a standard decoder position encoding. position_encoding_type="fourier", fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_decoder, ), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`. Returns: Examples: ```python >>> from transformers import PerceiverForOpticalFlow >>> import torch >>> model = PerceiverForOpticalFlow.from_pretrained("deepmind/optical-flow-perceiver") >>> # in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel, >>> # leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels) >>> # patches have shape (batch_size, num_frames, num_channels, height, width) >>> # the authors train on resolutions of 368 x 496 >>> patches = torch.randn(1, 2, 27, 368, 496) >>> outputs = model(inputs=patches) >>> logits = outputs.logits >>> list(logits.shape) [1, 368, 496, 2] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: raise NotImplementedError("Optical flow training is not yet supported") if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ Example use of Perceiver for multimodal (video) autoencoding, for tasks such as Kinetics-700. [`PerceiverForMultimodalAutoencoding`] uses [`~models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor`] to preprocess the 3 modalities: images, audio and class labels. This preprocessor uses modality-specific preprocessors to preprocess every modality separately, after which they are concatenated. Trainable position embeddings are used to pad each modality to the same number of channels to make concatenation along the time dimension possible. Next, one applies the Perceiver encoder. [`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] is used to decode the latent representation of [`PerceiverModel`]. This decoder uses each modality-specific decoder to construct queries. The decoder queries are created based on the inputs after preprocessing. However, autoencoding an entire video in a single forward pass is computationally infeasible, hence one only uses parts of the decoder queries to do cross-attention with the latent representation. This is determined by the subsampled indices for each modality, which can be provided as additional input to the forward pass of [`PerceiverForMultimodalAutoencoding`]. [`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] also pads the decoder queries of the different modalities to the same number of channels, in order to concatenate them along the time dimension. Next, cross-attention is performed with the latent representation of [`PerceiverModel`]. Finally, [`~models.perceiver.modeling_perceiver.PerceiverMultiModalPostprocessor`] is used to turn this tensor into an actual video. It first splits up the output into the different modalities, and then applies the respective postprocessor for each modality. Note that, by masking the classification label during evaluation (i.e. simply providing a tensor of zeros for the "label" modality), this auto-encoding model becomes a Kinetics 700 video classifier. """, PERCEIVER_START_DOCSTRING, ) class PerceiverForMultimodalAutoencoding(PerceiverPreTrainedModel): def __init__(self, config: PerceiverConfig): super().__init__(config) n_audio_samples = config.num_frames * config.audio_samples_per_frame input_preprocessor = PerceiverMultimodalPreprocessor( min_padding_size=4, modalities={ "audio": PerceiverAudioPreprocessor( config, position_encoding_type="fourier", fourier_position_encoding_kwargs={ "num_bands": 192, "max_resolution": (n_audio_samples,), "sine_only": False, "concat_pos": True, }, prep_type="patches", samples_per_patch=config.samples_per_patch, ), "image": PerceiverImagePreprocessor( config, position_encoding_type="fourier", fourier_position_encoding_kwargs={ "num_bands": 32, "max_resolution": (config.num_frames, config.image_size, config.image_size), "sine_only": False, "concat_pos": True, }, prep_type="patches", spatial_downsample=4, temporal_downsample=1, ), "label": PerceiverOneHotPreprocessor(config), }, mask_probs={"image": 0.0, "audio": 0.0, "label": 1.0}, ) image_decoder = PerceiverBasicVideoAutoencodingDecoder( config, # Autoencoding, don't pass inputs to the queries. concat_preprocessed_input=False, output_shape=config.output_shape, output_num_channels=config.output_num_channels, use_query_residual=False, position_encoding_only=True, position_encoding_type="fourier", fourier_position_encoding_kwargs={ "num_bands": 32, "max_resolution": (config.num_frames, config.image_size, config.image_size), "sine_only": False, "concat_pos": True, }, ) decoder = PerceiverMultimodalDecoder( config, # Autoencoding, don't pass inputs to the queries. concat_preprocessed_input=False, # Modality specific decoders are used ONLY to generate queries. # All modalties are decoded together using a unified decoder. modalities={ "audio": PerceiverBasicDecoder( config, # Autoencoding, don't pass inputs to the queries. concat_preprocessed_input=False, output_index_dims=(n_audio_samples // config.samples_per_patch,), output_num_channels=config.output_num_channels, use_query_residual=False, position_encoding_only=True, position_encoding_type="fourier", fourier_position_encoding_kwargs={ "num_bands": 192, "max_resolution": (n_audio_samples,), "sine_only": False, "concat_pos": True, }, ), "image": image_decoder, "label": PerceiverClassificationDecoder( config, # Autoencoding, don't pass inputs to the queries. concat_preprocessed_input=False, use_query_residual=False, position_encoding_only=True, position_encoding_type="trainable", trainable_position_encoding_kwargs={ "num_channels": config._label_trainable_num_channels, "index_dims": 1, }, ), }, num_outputs=None, output_num_channels=config.output_num_channels, use_query_residual=False, ) output_postprocessor = PerceiverMultimodalPostprocessor( modalities={ "audio": PerceiverAudioPostprocessor(config, in_channels=config.output_num_channels), "image": PerceiverProjectionPostprocessor(in_channels=config.output_num_channels, out_channels=3), "label": PerceiverClassificationPostprocessor(config, in_channels=config.output_num_channels), } ) self.perceiver = PerceiverModel( config, input_preprocessor=input_preprocessor, decoder=decoder, output_postprocessor=output_postprocessor, ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, subsampled_output_points: Optional[Dict[str, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, PerceiverClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import PerceiverForMultimodalAutoencoding >>> import torch >>> import numpy as np >>> # create multimodal inputs >>> images = torch.randn((1, 16, 3, 224, 224)) >>> audio = torch.randn((1, 30720, 1)) >>> inputs = dict(image=images, audio=audio, label=torch.zeros((images.shape[0], 700))) >>> model = PerceiverForMultimodalAutoencoding.from_pretrained("deepmind/multimodal-perceiver") >>> # in the Perceiver IO paper, videos are auto-encoded in chunks >>> # each chunk subsamples different index dimensions of the image and audio modality decoder queries >>> nchunks = 128 >>> image_chunk_size = np.prod((16, 224, 224)) // nchunks >>> audio_chunk_size = audio.shape[1] // model.config.samples_per_patch // nchunks >>> # process the first chunk >>> chunk_idx = 0 >>> subsampling = { ... "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)), ... "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)), ... "label": None, ... } >>> outputs = model(inputs=inputs, subsampled_output_points=subsampling) >>> logits = outputs.logits >>> list(logits["audio"].shape) [1, 240] >>> list(logits["image"].shape) [1, 6272, 3] >>> list(logits["label"].shape) [1, 700] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.perceiver( inputs=inputs, attention_mask=attention_mask, subsampled_output_points=subsampled_output_points, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: raise NotImplementedError("Multimodal autoencoding training is not yet supported") if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return PerceiverClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) # Below: position encodings def build_position_encoding( position_encoding_type, out_channels=None, project_pos_dim=-1, trainable_position_encoding_kwargs=None, fourier_position_encoding_kwargs=None, ): """ Builds the position encoding. Args: - out_channels: refers to the number of channels of the position encodings. - project_pos_dim: if specified, will project the position encodings to this dimension. """ if position_encoding_type == "trainable": if not trainable_position_encoding_kwargs: raise ValueError("Make sure to pass trainable_position_encoding_kwargs") output_pos_enc = PerceiverTrainablePositionEncoding(**trainable_position_encoding_kwargs) elif position_encoding_type == "fourier": # We don't use the index_dims argument, as this is only known during the forward pass if not fourier_position_encoding_kwargs: raise ValueError("Make sure to pass fourier_position_encoding_kwargs") output_pos_enc = PerceiverFourierPositionEncoding(**fourier_position_encoding_kwargs) else: raise ValueError(f"Unknown position encoding type: {position_encoding_type}.") # Optionally, project the position encoding to a target dimension: positions_projection = nn.Linear(out_channels, project_pos_dim) if project_pos_dim > 0 else nn.Identity() return output_pos_enc, positions_projection # Below: Perceiver decoders class PerceiverAbstractDecoder(nn.Module, metaclass=abc.ABCMeta): """Perceiver abstract decoder.""" @abc.abstractmethod def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): raise NotImplementedError @property @abc.abstractmethod def num_query_channels(self): raise NotImplementedError @abc.abstractmethod def forward(self, query, z, query_mask=None): raise NotImplementedError class PerceiverProjectionDecoder(PerceiverAbstractDecoder): """ Baseline projection decoder (no cross-attention). Args: config ([`PerceiverConfig`]): Model configuration. """ def __init__(self, config): super().__init__() self.classifier = nn.Linear(config.d_latents, config.num_labels) def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): return None def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None ) -> torch.FloatTensor: # (batch_size, num_latents, d_latents) -> (batch_size, d_latents) z = torch.mean(z, dim=1) # (batch_size, d_latents) -> (batch_size, config.num_labels) logits = self.classifier(z) return logits class PerceiverBasicDecoder(PerceiverAbstractDecoder): """ Cross-attention-based decoder. This class can be used to decode the final hidden states of the latents using a cross-attention operation, in which the latents produce keys and values. The shape of the output of this class depends on how one defines the output queries (also called decoder queries). Args: config ([*PerceiverConfig*]): Model configuration. output_num_channels (`int`, *optional*): The number of channels in the output. Will only be used in case *final_project* is set to `True`. position_encoding_type (`str`, *optional*, defaults to "trainable"): The type of position encoding to use. Can be either "trainable", "fourier", or "none". output_index_dims (`int`, *optional*): The number of dimensions of the output queries. Ignored if 'position_encoding_type' == 'none'. num_channels (`int`, *optional*, defaults to 128): The number of channels of the decoder queries. Ignored if 'position_encoding_type' == 'none'. qk_channels (`int`, *optional*): The number of channels of the queries and keys in the cross-attention layer. v_channels (`int`, *optional*): The number of channels of the values in the cross-attention layer. num_heads (`int`, *optional*, defaults to 1): The number of attention heads in the cross-attention layer. widening_factor (`int`, *optional*, defaults to 1): The widening factor of the cross-attention layer. use_query_residual (`bool`, *optional*, defaults to `False`): Whether to use a residual connection between the query and the output of the cross-attention layer. concat_preprocessed_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the preprocessed input to the query. final_project (`bool`, *optional*, defaults to `True`): Whether to project the output of the cross-attention layer to a target dimension. position_encoding_only (`bool`, *optional*, defaults to `False`): Whether to only use this class to define output queries. """ def __init__( self, config: PerceiverConfig, output_num_channels: int, position_encoding_type: Optional[str] = "trainable", # The following 2 arguments are ignored if position_encoding_type == 'none': output_index_dims: Optional[int] = None, num_channels: Optional[int] = 128, subsampled_index_dims: Optional[int] = None, qk_channels: Optional[int] = None, v_channels: Optional[int] = None, num_heads: Optional[int] = 1, widening_factor: Optional[int] = 1, use_query_residual: Optional[bool] = False, concat_preprocessed_input: Optional[bool] = False, final_project: Optional[bool] = True, position_encoding_only: Optional[bool] = False, **position_encoding_kwargs, ) -> None: super().__init__() self.output_num_channels = output_num_channels # If `none`, the decoder will not construct any position encodings. # You should construct your own when querying the decoder. self.output_position_encodings = None self.position_encoding_type = position_encoding_type self.position_encoding_kwargs = position_encoding_kwargs if position_encoding_type != "none": self.output_position_encodings, self.positions_projection = build_position_encoding( position_encoding_type=position_encoding_type, **position_encoding_kwargs ) self.output_index_dims = output_index_dims self.num_channels = num_channels if subsampled_index_dims is None: subsampled_index_dims = output_index_dims self.subsampled_index_dims = subsampled_index_dims self.concat_preprocessed_input = concat_preprocessed_input self.final_project = final_project self.position_encoding_only = position_encoding_only # for multimodal autoencoding, we don't need the decoder cross-attention and final layer # so then we will set position_encoding_only to True if not self.position_encoding_only: self.decoding_cross_attention = PerceiverLayer( config, is_cross_attention=True, qk_channels=qk_channels, v_channels=v_channels, num_heads=num_heads, q_dim=num_channels, kv_dim=config.d_latents, widening_factor=widening_factor, use_query_residual=use_query_residual, ) self.final_layer = nn.Linear(num_channels, output_num_channels) if final_project else nn.Identity() @property def num_query_channels(self) -> int: if self.position_encoding_type == "none": # Queries come from elsewhere raise ValueError( "You cannot calculate number of decoder query channels when position_encoding_type is set to none" ) if self.position_encoding_only: if "project_pos_dim" in self.position_encoding_kwargs: return self.position_encoding_kwargs["project_pos_dim"] return self.output_position_encodings.output_size() if self.final_project: return self.output_num_channels return self.num_channels def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): if self.position_encoding_type == "none": # Queries come from elsewhere raise ValueError("You cannot construct decoder queries when position_encoding_type is set to none") if subsampled_points is not None: # subsampled_points are the indices if the inputs would be flattened # however, the inputs aren't flattened, that's why we use unravel_index # to get the indices for the unflattened array # unravel_index returns a tuple (x_idx, y_idx, ...) # stack to get the [n, d] tensor of coordinates indices = [torch.from_numpy(x) for x in np.unravel_index(subsampled_points.cpu(), self.output_index_dims)] pos = torch.stack(indices, dim=1) batch_size = inputs.shape[0] # Map these coordinates to [-1, 1] pos = -1 + 2 * pos / torch.tensor(self.output_index_dims)[None, :] pos = torch.broadcast_to(pos[None], [batch_size, pos.shape[0], pos.shape[1]]) # Construct the position encoding. if self.position_encoding_type == "trainable": pos_emb = self.output_position_encodings(batch_size) elif self.position_encoding_type == "fourier": pos_emb = self.output_position_encodings( self.output_index_dims, batch_size=batch_size, device=inputs.device, dtype=inputs.dtype, pos=pos ) # Optionally project them to a target dimension. pos_emb = self.positions_projection(pos_emb) pos_emb = torch.reshape(pos_emb, [pos_emb.shape[0], -1, pos_emb.shape[-1]]) else: batch_size = inputs.shape[0] index_dims = inputs.shape[2:] # Construct the position encoding. if self.position_encoding_type == "trainable": pos_emb = self.output_position_encodings(batch_size) elif self.position_encoding_type == "fourier": pos_emb = self.output_position_encodings( index_dims, batch_size, device=inputs.device, dtype=inputs.dtype ) # Optionally project them to a target dimension. pos_emb = self.positions_projection(pos_emb) if self.concat_preprocessed_input: if inputs_without_pos is None: raise ValueError("Value is required for inputs_without_pos if concat_preprocessed_input is True") pos_emb = torch.cat([inputs_without_pos, pos_emb], dim=-1) return pos_emb def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> PerceiverDecoderOutput: # Cross-attention decoding. # key, value: B x N x K; query: B x M x K # Attention maps -> B x N x M # Output -> B x M x K cross_attentions = () if output_attentions else None layer_outputs = self.decoding_cross_attention( query, attention_mask=query_mask, head_mask=None, inputs=z, inputs_mask=None, output_attentions=output_attentions, ) output = layer_outputs[0] if output_attentions: cross_attentions = cross_attentions + (layer_outputs[1],) logits = self.final_layer(output) return PerceiverDecoderOutput(logits=logits, cross_attentions=cross_attentions) class PerceiverClassificationDecoder(PerceiverAbstractDecoder): """ Cross-attention based classification decoder. Light-weight wrapper of [`PerceiverBasicDecoder`] for logit output. Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels). Args: config ([`PerceiverConfig`]): Model configuration. """ def __init__(self, config, **decoder_kwargs): super().__init__() self.num_labels = config.num_labels self.decoder = PerceiverBasicDecoder( config, output_num_channels=self.num_labels, output_index_dims=1, # Predict a single logit array. **decoder_kwargs, ) @property def num_query_channels(self) -> int: return self.decoder.num_query_channels def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): return self.decoder.decoder_query( inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_points ) def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> PerceiverDecoderOutput: decoder_outputs = self.decoder(query, z, output_attentions=output_attentions) # B x 1 x num_classes -> B x num_classes logits = decoder_outputs.logits[:, 0, :] return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions) class PerceiverOpticalFlowDecoder(PerceiverAbstractDecoder): """Cross-attention based optical flow decoder.""" def __init__(self, config, output_image_shape, output_num_channels=2, rescale_factor=100.0, **decoder_kwargs): super().__init__() self.output_image_shape = output_image_shape self.output_num_channels = output_num_channels self.rescale_factor = rescale_factor self.decoder = PerceiverBasicDecoder(config, output_num_channels=output_num_channels, **decoder_kwargs) @property def num_query_channels(self) -> int: return self.decoder.num_query_channels def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): if subsampled_points is not None: raise ValueError("FlowDecoder doesn't support subsampling yet.") return inputs def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> PerceiverDecoderOutput: decoder_outputs = self.decoder(query, z, output_attentions=output_attentions) preds = decoder_outputs.logits # Output flow and rescale. preds /= self.rescale_factor preds = preds.reshape([preds.shape[0]] + list(self.output_image_shape) + [preds.shape[-1]]) return PerceiverDecoderOutput(logits=preds, cross_attentions=decoder_outputs.cross_attentions) class PerceiverBasicVideoAutoencodingDecoder(PerceiverAbstractDecoder): """ Cross-attention based video-autoencoding decoder. Light-weight wrapper of [*PerceiverBasicDecoder*] with video reshaping logic. Args: config ([*PerceiverConfig*]): Model configuration. output_shape (`List[int]`): Shape of the output as (batch_size, num_frames, height, width), excluding the channel dimension. position_encoding_type (`str`): The type of position encoding to use. Can be either "trainable", "fourier", or "none". """ def __init__( self, config: PerceiverConfig, output_shape: List[int], position_encoding_type: str, **decoder_kwargs ) -> None: super().__init__() if len(output_shape) != 4: # B, T, H, W raise ValueError(f"Expected rank 4 output_shape, got {output_shape}.") # Build the decoder components: self.output_shape = output_shape self.output_num_channels = decoder_kwargs["output_num_channels"] self.decoder = PerceiverBasicDecoder( config, output_index_dims=self.output_shape[1:4], # T*H*W position_encoding_type=position_encoding_type, **decoder_kwargs, ) @property def num_query_channels(self) -> int: return self.decoder.num_query_channels def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None): return self.decoder.decoder_query( inputs, modality_sizes=modality_sizes, inputs_without_pos=inputs_without_pos, subsampled_points=subsampled_points, ) def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None ) -> PerceiverDecoderOutput: decoder_outputs = self.decoder(query, z) logits = decoder_outputs.logits logits = torch.reshape(logits, self.output_shape + [logits.shape[-1]]) return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions) def restructure(modality_sizes: ModalitySizeType, inputs: torch.Tensor) -> Mapping[str, torch.Tensor]: """ Partitions a [B, N, C] tensor into tensors for each modality. Args: modality_sizes dict specifying the size of the modality inputs: input tensor Returns: dict mapping name of modality to its associated tensor. """ outputs = {} index = 0 # Apply a predictable ordering to the modalities for modality in sorted(modality_sizes.keys()): size = modality_sizes[modality] inp = inputs[:, index : index + size] index += size outputs[modality] = inp return outputs class PerceiverMultimodalDecoder(PerceiverAbstractDecoder): """ Multimodal decoding by composing uni-modal decoders. The *modalities* argument of the constructor is a dictionary mapping modality name to the decoder of that modality. That decoder will be used to construct queries for that modality. Modality-specific queries are padded with trainable modality-specific parameters, after which they are concatenated along the time dimension. Next, there is a shared cross attention operation across all modalities. Args: config ([*PerceiverConfig*]): Model configuration. modalities (`Dict[str, PerceiverAbstractDecoder]`): Dictionary mapping modality name to the decoder of that modality. num_outputs (`int`): The number of outputs of the decoder. output_num_channels (`int`): The number of channels in the output. min_padding_size (`int`, *optional*, defaults to 2): The minimum padding size for all modalities. The final output will have num_channels equal to the maximum channels across all modalities plus min_padding_size. subsampled_index_dims (`Dict[str, PerceiverAbstractDecoder]`, *optional*): Dictionary mapping modality name to the subsampled index dimensions to use for the decoder query of that modality. """ def __init__( self, config: PerceiverConfig, modalities: Dict[str, PerceiverAbstractDecoder], num_outputs: int, output_num_channels: int, min_padding_size: Optional[int] = 2, subsampled_index_dims: Optional[Dict[str, PerceiverAbstractDecoder]] = None, **decoder_kwargs, ) -> None: super().__init__() self.modalities = nn.ModuleDict(modalities) self.subsampled_index_dims = subsampled_index_dims self.min_padding_size = min_padding_size self.output_num_channels = output_num_channels self.num_outputs = num_outputs self.decoder = PerceiverBasicDecoder( config, output_index_dims=(num_outputs,), output_num_channels=output_num_channels, position_encoding_type="none", num_channels=self.num_query_channels, **decoder_kwargs, ) self.padding = nn.ParameterDict( { modality: nn.Parameter(torch.randn(1, self.num_query_channels - decoder.num_query_channels)) for modality, decoder in modalities.items() } ) @property def num_query_channels(self) -> int: max_channel_size = max(decoder.num_query_channels for _, decoder in self.modalities.items()) common_channel_size = max_channel_size + self.min_padding_size return common_channel_size def decoder_query(self, inputs, modality_sizes, inputs_without_pos=None, subsampled_points=None): # Partition the flat inputs among the different modalities inputs = restructure(modality_sizes, inputs) # Obtain modality-specific decoders' queries subsampled_points = subsampled_points or {} decoder_queries = {} for modality, decoder in self.modalities.items(): # Get input_without_pos for this modality if it exists. input_without_pos = None if inputs_without_pos is not None: input_without_pos = inputs_without_pos.get(modality, None) query = decoder.decoder_query( inputs=inputs[modality], modality_sizes=None, inputs_without_pos=input_without_pos, subsampled_points=subsampled_points.get(modality, None), ) decoder_queries[modality] = query # Pad all queries with trainable position encodings to make them have the same channels def embed(modality, x): x = torch.reshape(x, [x.shape[0], np.prod(x.shape[1:-1]), x.shape[-1]]) pos = self.padding[modality] pos = torch.broadcast_to(pos, [x.shape[0], x.shape[1], self.num_query_channels - x.shape[2]]) return torch.cat([x, pos], dim=2) # Apply a predictable ordering to the modalities return torch.cat( [embed(modality, decoder_queries[modality]) for modality in sorted(self.modalities.keys())], dim=1 ) def forward( self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> torch.Tensor: # B x 1 x num_classes -> B x num_classes decoder_outputs = self.decoder(query, z, output_attentions=output_attentions) return decoder_outputs # Below: IO pre- and post-processor classes for Perceiver. def space_to_depth(frames: torch.Tensor, temporal_block_size: int = 1, spatial_block_size: int = 1) -> torch.Tensor: """ Space to depth transform. Rearranges blocks of spatial data, into depth. This function assumes the channels to be first, but will place the channels last after transformation. Based on https://discuss.pytorch.org/t/is-there-any-layer-like-tensorflows-space-to-depth-function/3487/15. """ if len(frames.shape) == 4: batch_size, num_channels, height, width = frames.shape # split up dimensions (height by spatial_block_size, width by spatial_block_size) frames = frames.view( batch_size, num_channels, height // spatial_block_size, spatial_block_size, width // spatial_block_size, spatial_block_size, ) # move blocks to last dimension: (batch_size, H//bs, W//bs, bs, bs, C) frames = frames.permute(0, 2, 4, 3, 5, 1).contiguous() # concatenate blocks along channel dimension: (batch_size, H//bs, W//bs, bs*bs*C) frames = frames.view( batch_size, height // spatial_block_size, width // spatial_block_size, (spatial_block_size**2) * num_channels, ) return frames elif len(frames.shape) == 5: batch_size, time, num_channels, height, width = frames.shape # split up dimensions (time by temporal_block_size, height by spatial_block_size, width by spatial_block_size) frames = frames.view( batch_size, time // temporal_block_size, temporal_block_size, num_channels, height // spatial_block_size, spatial_block_size, width // spatial_block_size, spatial_block_size, ) # move blocks to last dimension: (batch_size, T//ts, H//bs, W//bs, ts, bs, bs, C) frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous() # concatenate blocks along channel dimension: (batch_size, T//ts, H//bs, W//bs, ts*bs*bs*C) frames = frames.view( batch_size, time // temporal_block_size, height // spatial_block_size, width // spatial_block_size, temporal_block_size * (spatial_block_size**2) * num_channels, ) return frames else: raise ValueError( "Frames should be of rank 4 (batch, channels, height, width)" " or rank 5 (batch, time, channels, height, width)" ) class Conv2dSamePadding(nn.Conv2d): """ Conv2d layer with padding="same" support. Source: https://gist.github.com/sumanmichael/4de9dee93f972d47c80c4ade8e149ea6 """ def __init__(self, *args, **kwargs): super(Conv2dSamePadding, self).__init__(*args, **kwargs) self.zero_pad_2d = nn.ZeroPad2d( reduce(__add__, [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]]) ) def forward(self, input): return self._conv_forward(self.zero_pad_2d(input), self.weight, self.bias) class Conv2DDownsample(nn.Module): """Downsamples 4x by applying a 2D convolution and doing max pooling.""" def __init__( self, num_layers: int = 1, in_channels: int = 3, out_channels: int = 64, use_batchnorm: bool = True, ): """ Constructs a Conv2DDownsample model. Args: in_channels (`int`, *optional*, defaults to 3): The number of input channels. out_channels (`int`, *optional*, defaults to 64): The number of conv output channels. use_batchnorm (`bool`, *optional*, defaults to `True`): Whether to use batchnorm. """ super().__init__() self.conv = Conv2dSamePadding( in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False ) self.batchnorm = nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity() self.relu = nn.ReLU() self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2) def forward(self, inputs: torch.Tensor) -> torch.Tensor: out = self.conv(inputs) out = self.batchnorm(out) out = self.relu(out) out = self.max_pool(out) return out def generate_fourier_features(pos, num_bands, max_resolution=(224, 224), concat_pos=True, sine_only=False): """ Generate a Fourier frequency position encoding with linear spacing. Args: pos (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`): The Tensor containing the position of n points in d dimensional space. num_bands (`int`): The number of frequency bands (K) to use. max_resolution (`Tuple[int]`, *optional*, defaults to (224, 224)): The maximum resolution (i.e. the number of pixels per dim). A tuple representing resolution for each dimension. concat_pos (`bool`, *optional*, defaults to `True`): Whether to concatenate the input position encoding to the Fourier features. sine_only (`bool`, *optional*, defaults to `False`): Whether to use a single phase (sin) or two (sin/cos) for each frequency band. Returns: `torch.FloatTensor` of shape `(batch_size, sequence_length, n_channels)`: The Fourier position embeddings. If `concat_pos` is `True` and `sine_only` is `False`, output dimensions are ordered as: [dim_1, dim_2, ..., dim_d, sin(pi*f_1*dim_1), ..., sin(pi*f_K*dim_1), ..., sin(pi*f_1*dim_d), ..., sin(pi*f_K*dim_d), cos(pi*f_1*dim_1), ..., cos(pi*f_K*dim_1), ..., cos(pi*f_1*dim_d), ..., cos(pi*f_K*dim_d)], where dim_i is pos[:, i] and f_k is the kth frequency band. """ batch_size = pos.shape[0] min_freq = 1.0 # Nyquist frequency at the target resolution: freq_bands = torch.stack( [torch.linspace(start=min_freq, end=res / 2, steps=num_bands) for res in max_resolution], dim=0 ) # Get frequency bands for each spatial dimension. # Output is size [n, d * num_bands] per_pos_features = pos[0, :, :][:, :, None] * freq_bands[None, :, :] per_pos_features = torch.reshape(per_pos_features, [-1, np.prod(per_pos_features.shape[1:])]) if sine_only: # Output is size [n, d * num_bands] per_pos_features = torch.sin(np.pi * (per_pos_features)) else: # Output is size [n, 2 * d * num_bands] per_pos_features = torch.cat( [torch.sin(np.pi * per_pos_features), torch.cos(np.pi * per_pos_features)], dim=-1 ) # Concatenate the raw input positions. if concat_pos: # Adds d bands to the encoding. per_pos_features = torch.cat([pos, per_pos_features.expand(batch_size, -1, -1)], dim=-1) return per_pos_features def build_linear_positions(index_dims, output_range=(-1.0, 1.0)): """ Generate an array of position indices for an N-D input array. Args: index_dims (`List[int]`): The shape of the index dimensions of the input array. output_range (`Tuple[float]`, *optional*, defaults to `(-1.0, 1.0)`): The min and max values taken by each input index dimension. Returns: `torch.FloatTensor` of shape `(index_dims[0], index_dims[1], .., index_dims[-1], N)`. """ def _linspace(n_xels_per_dim): return torch.linspace(start=output_range[0], end=output_range[1], steps=n_xels_per_dim, dtype=torch.float32) dim_ranges = [_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims] array_index_grid = meshgrid(*dim_ranges, indexing="ij") return torch.stack(array_index_grid, dim=-1) class PerceiverAbstractPositionEncoding(nn.Module, metaclass=abc.ABCMeta): """Perceiver abstract position encoding.""" @property @abc.abstractmethod def num_dimensions(self) -> int: raise NotImplementedError @abc.abstractmethod def output_size(self, *args, **kwargs) -> int: raise NotImplementedError @abc.abstractmethod def forward(self, batch_size, pos): raise NotImplementedError class PerceiverTrainablePositionEncoding(PerceiverAbstractPositionEncoding): """Trainable position encoding.""" def __init__(self, index_dims, num_channels=128): super().__init__() self._num_channels = num_channels self._index_dims = index_dims index_dim = np.prod(index_dims) self.position_embeddings = nn.Parameter(torch.randn(index_dim, num_channels)) @property def num_dimensions(self) -> int: if isinstance(self._index_dims, int): return 1 return len(self._index_dims) def output_size(self, *args, **kwargs) -> int: return self._num_channels def forward(self, batch_size: int) -> torch.Tensor: position_embeddings = self.position_embeddings if batch_size is not None: position_embeddings = position_embeddings.expand(batch_size, -1, -1) return position_embeddings def _check_or_build_spatial_positions(pos, index_dims, batch_size): """ Checks or builds spatial position features (x, y, ...). Args: pos (`torch.FloatTensor`): None, or an array of position features. If None, position features are built. Otherwise, their size is checked. index_dims (`List[int]`): An iterable giving the spatial/index size of the data to be featurized. batch_size (`int`): The batch size of the data to be featurized. Returns: `torch.FloatTensor` of shape `(batch_size, prod(index_dims))` an array of position features. """ if pos is None: pos = build_linear_positions(index_dims) # equivalent to `torch.broadcast_to(pos[None], (batch_size,) + pos.shape)` # but `torch.broadcast_to` cannot be converted to ONNX pos = pos[None].expand((batch_size,) + pos.shape) pos = torch.reshape(pos, [batch_size, np.prod(index_dims), -1]) else: # Just a warning label: you probably don't want your spatial features to # have a different spatial layout than your pos coordinate system. # But feel free to override if you think it'll work! if pos.shape[-1] != len(index_dims): raise ValueError("Spatial features have the wrong number of dimensions.") return pos class PerceiverFourierPositionEncoding(PerceiverAbstractPositionEncoding): """Fourier (Sinusoidal) position encoding.""" def __init__(self, num_bands, max_resolution, concat_pos=True, sine_only=False): super().__init__() self.num_bands = num_bands self.max_resolution = max_resolution self.concat_pos = concat_pos self.sine_only = sine_only @property def num_dimensions(self) -> int: return len(self.max_resolution) def output_size(self): """Returns size of positional encodings last dimension.""" num_dims = len(self.max_resolution) encoding_size = self.num_bands * num_dims if not self.sine_only: encoding_size *= 2 if self.concat_pos: encoding_size += self.num_dimensions return encoding_size def forward( self, index_dims: List[int], batch_size: int, device: torch.device, dtype: torch.dtype, pos: torch.FloatTensor = None, ) -> torch.FloatTensor: pos = _check_or_build_spatial_positions(pos, index_dims, batch_size) fourier_pos_enc = generate_fourier_features( pos, num_bands=self.num_bands, max_resolution=self.max_resolution, concat_pos=self.concat_pos, sine_only=self.sine_only, ).to(device=device, dtype=dtype) return fourier_pos_enc class AbstractPreprocessor(nn.Module): @property def num_channels(self) -> int: """Returns size of preprocessor output.""" raise NotImplementedError() class PerceiverTextPreprocessor(AbstractPreprocessor): """ Text preprocessing for Perceiver Encoder. Can be used to embed `inputs` and add positional encodings. The dimensionality of the embeddings is determined by the `d_model` attribute of the configuration. Args: config ([`PerceiverConfig`]): Model configuration. """ def __init__(self, config: PerceiverConfig) -> None: super().__init__() self.config = config self.embeddings = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.d_model) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model) @property def num_channels(self) -> int: return self.config.d_model def forward(self, inputs: torch.LongTensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True): embeddings_without_pos = self.embeddings(inputs) seq_length = inputs.shape[1] position_ids = torch.arange(0, seq_length, device=inputs.device) embeddings = embeddings_without_pos + self.position_embeddings(position_ids) return embeddings, None, embeddings_without_pos class PerceiverEmbeddingDecoder(nn.Module): """ Module to decode embeddings (for masked language modeling). Args: config ([`PerceiverConfig`]): Model configuration. """ def __init__(self, config: PerceiverConfig) -> None: super().__init__() self.config = config self.vocab_size = config.vocab_size self.bias = nn.Parameter(torch.zeros(self.vocab_size)) def forward(self, hidden_states: torch.Tensor, embedding_layer: torch.Tensor) -> torch.Tensor: batch_size, seq_len, d_model = hidden_states.shape # Flatten batch dim output = torch.matmul(hidden_states.reshape([-1, d_model]), embedding_layer.weight.transpose(0, 1)) output = output + self.bias return output.reshape([batch_size, seq_len, self.vocab_size]) class PerceiverMultimodalPostprocessor(nn.Module): """ Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single postprocessor. Args: modalities (`Mapping[str, PostprocessorType]`): Dictionary mapping modality name to postprocessor class for that modality. input_is_dict (`bool`, *optional*, defaults to `False`): If True, input is assumed to be dictionary structured, and outputs keep the same dictionary shape. If False, input is a tensor which is sliced up during postprocessing by *modality_sizes*. """ def __init__(self, modalities: Mapping[str, PostprocessorType], input_is_dict: bool = False): super().__init__() self.modalities = nn.ModuleDict(modalities) self.input_is_dict = input_is_dict def forward( self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None ) -> Mapping[str, torch.Tensor]: if not self.input_is_dict: # Slice up modalities by their sizes. if modality_sizes is None: raise ValueError("Modality sizes should be specified if input is not a dictionary.") inputs = restructure(modality_sizes=modality_sizes, inputs=inputs) outputs = { modality: postprocessor(inputs[modality], pos=pos, modality_sizes=None) for modality, postprocessor in self.modalities.items() } return outputs class PerceiverClassificationPostprocessor(nn.Module): """ Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits. Args: config ([*PerceiverConfig*]): Model configuration. in_channels (`int`): Number of channels in the input. """ def __init__(self, config: PerceiverConfig, in_channels: int) -> None: super().__init__() self.classifier = nn.Linear(in_channels, config.num_labels) def forward(self, inputs, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor: logits = self.classifier(inputs) return logits[:, 0, :] class PerceiverAudioPostprocessor(nn.Module): """ Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features. Args: config ([*PerceiverConfig*]): Model configuration. in_channels (`int`): Number of channels in the input. postproc_type (`str`, *optional*, defaults to `"patches"`): Postprocessor type to use. Currently, only "patches" is supported. """ def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str = "patches") -> None: super().__init__() if postproc_type not in ("patches",): # to be supported: 'conv', 'patches', 'pixels' raise ValueError("Invalid postproc_type!") # Architecture parameters: self.classifier = nn.Linear(in_channels, config.samples_per_patch) def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor: logits = self.classifier(inputs) return torch.reshape(logits, [inputs.shape[0], -1]) class PerceiverProjectionPostprocessor(nn.Module): """ Projection postprocessing for Perceiver. Can be used to project the channels of the decoder output to a lower dimension. Args: in_channels (`int`): Number of channels in the input. out_channels (`int`): Number of channels in the output. """ def __init__(self, in_channels: int, out_channels: int) -> None: super().__init__() self.classifier = nn.Linear(in_channels, out_channels) def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor: logits = self.classifier(inputs) return logits class PerceiverImagePreprocessor(AbstractPreprocessor): """ Image preprocessing for Perceiver Encoder. Note: the *out_channels* argument refers to the output channels of a convolutional layer, if *prep_type* is set to "conv1x1" or "conv". If one adds absolute position embeddings, one must make sure the *num_channels* of the position encoding kwargs are set equal to the *out_channels*. Args: config ([*PerceiverConfig*]): Model configuration. prep_type (`str`, *optional*, defaults to `"conv"`): Preprocessing type. Can be "conv1x1", "conv", "patches", "pixels". spatial_downsample (`int`, *optional*, defaults to 4): Spatial downsampling factor. temporal_downsample (`int`, *optional*, defaults to 1): Temporal downsampling factor (only relevant in case a time dimension is present). position_encoding_type (`str`, *optional*, defaults to `"fourier"`): Position encoding type. Can be "fourier" or "trainable". in_channels (`int`, *optional*, defaults to 3): Number of channels in the input. out_channels (`int`, *optional*, defaults to 64): Number of channels in the output. conv_after_patching (`bool`, *optional*, defaults to `False`): Whether to apply a convolutional layer after patching. conv_after_patching_in_channels (`int`, *optional*, defaults to 54): Number of channels in the input of the convolutional layer after patching. conv2d_use_batchnorm (`bool`, *optional*, defaults to `True`): Whether to use batch normalization in the convolutional layer. concat_or_add_pos (`str`, *optional*, defaults to `"concat"`): How to concatenate the position encoding to the input. Can be "concat" or "add". project_pos_dim (`int`, *optional*, defaults to -1): Dimension of the position encoding to project to. If -1, no projection is applied. **position_encoding_kwargs (`Dict`, *optional*): Keyword arguments for the position encoding. """ def __init__( self, config, prep_type="conv", spatial_downsample: int = 4, temporal_downsample: int = 1, position_encoding_type: str = "fourier", in_channels: int = 3, out_channels: int = 64, conv_after_patching: bool = False, conv_after_patching_in_channels: int = 54, # only relevant when conv_after_patching = True conv2d_use_batchnorm: bool = True, concat_or_add_pos: str = "concat", project_pos_dim: int = -1, **position_encoding_kwargs, ): super().__init__() self.config = config if prep_type not in ("conv", "patches", "pixels", "conv1x1"): raise ValueError(f"Prep_type {prep_type} is invalid") if concat_or_add_pos not in ["concat", "add"]: raise ValueError(f"Invalid value {concat_or_add_pos} for concat_or_add_pos.") self.in_channels = in_channels self.prep_type = prep_type self.spatial_downsample = spatial_downsample self.temporal_downsample = temporal_downsample self.position_encoding_type = position_encoding_type self.concat_or_add_pos = concat_or_add_pos self.conv_after_patching = conv_after_patching self.out_channels = out_channels if self.prep_type == "conv": # Downsampling with conv is currently restricted convnet_num_layers = math.log(spatial_downsample, 4) convnet_num_layers_is_int = convnet_num_layers == np.round(convnet_num_layers) if not convnet_num_layers_is_int or temporal_downsample != 1: raise ValueError( "Only powers of 4 expected for spatial and 1 expected for temporal downsampling with conv." ) self.convnet = Conv2DDownsample( in_channels=in_channels, num_layers=int(convnet_num_layers), out_channels=out_channels, use_batchnorm=conv2d_use_batchnorm, ) elif self.prep_type == "conv1x1": if temporal_downsample != 1: raise ValueError("Conv1x1 does not downsample in time.") self.convnet_1x1 = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1), # spatial_downsample is unconstrained for 1x1 convolutions. stride=(spatial_downsample, spatial_downsample), ) # Position embeddings self.project_pos_dim = project_pos_dim self.position_embeddings, self.positions_projection = build_position_encoding( position_encoding_type=position_encoding_type, out_channels=out_channels, project_pos_dim=project_pos_dim, **position_encoding_kwargs, ) # Optional convolutional layer after patches. self.conv_after_patches = ( nn.Linear(conv_after_patching_in_channels, self.out_channels) if conv_after_patching else nn.Identity() ) @property def num_channels(self) -> int: # Let's assume that the number of resolutions (in the context of image preprocessing) # of the input data is 2 or 3 depending on whether we are processing image or video respectively. # In this case, for convenience, we will declare is_temporal variable, # which will show whether the data has a temporal dimension or not. is_temporal = self.position_embeddings.num_dimensions > 2 # position embedding if self.project_pos_dim > 0: pos_dim = self.project_pos_dim else: pos_dim = self.position_embeddings.output_size() if self.concat_or_add_pos == "add": return pos_dim # inputs if self.conv_after_patching or self.prep_type in ("conv1x1", "conv"): inp_dim = self.out_channels elif self.prep_type == "pixels": inp_dim = self.in_channels if not is_temporal: inp_dim = math.ceil(inp_dim / self.spatial_downsample) elif self.prep_type == "patches": if self.conv_after_patching: inp_dim = self.out_channels else: inp_dim = self.in_channels * self.spatial_downsample**2 if is_temporal: inp_dim *= self.temporal_downsample return inp_dim + pos_dim def _build_network_inputs(self, inputs: torch.Tensor, network_input_is_1d: bool = True): """ Construct the final input, including position encoding. This method expects the inputs to always have channels as last dimension. """ batch_size = inputs.shape[0] index_dims = inputs.shape[1:-1] indices = np.prod(index_dims) # Flatten input features to a 1D index dimension if necessary. if len(inputs.shape) > 3 and network_input_is_1d: inputs = torch.reshape(inputs, [batch_size, indices, -1]) # Construct the position encoding. if self.position_encoding_type == "trainable": pos_enc = self.position_embeddings(batch_size) elif self.position_encoding_type == "fourier": pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype) # Optionally project them to a target dimension. pos_enc = self.positions_projection(pos_enc) if not network_input_is_1d: # Reshape pos to match the input feature shape # if the network takes non-1D inputs sh = inputs.shape pos_enc = torch.reshape(pos_enc, list(sh)[:-1] + [-1]) if self.concat_or_add_pos == "concat": inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1) elif self.concat_or_add_pos == "add": inputs_with_pos = inputs + pos_enc return inputs_with_pos, inputs def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True): if self.prep_type == "conv": # Convnet image featurization. # Downsamples spatially by a factor of 4 inputs = self.convnet(inputs) elif self.prep_type == "conv1x1": # map inputs to self.out_channels inputs = self.convnet_1x1(inputs) elif self.prep_type == "pixels": # if requested, downsamples in the crudest way if inputs.ndim == 4: inputs = inputs[:: self.spatial_downsample, :: self.spatial_downsample] elif inputs.ndim == 5: inputs = inputs[ :, :: self.temporal_downsample, :, :: self.spatial_downsample, :: self.spatial_downsample ] else: raise ValueError("Unsupported data format for pixels.") elif self.prep_type == "patches": # Space2depth featurization. # Video: B x T x C x H x W inputs = space_to_depth( inputs, temporal_block_size=self.temporal_downsample, spatial_block_size=self.spatial_downsample ) if inputs.ndim == 5 and inputs.shape[1] == 1: # for flow inputs = inputs.squeeze(dim=1) # Optionally apply conv layer. inputs = self.conv_after_patches(inputs) if self.prep_type != "patches": # move channels to last dimension, as the _build_network_inputs method below expects this if inputs.ndim == 4: inputs = inputs.permute(0, 2, 3, 1) elif inputs.ndim == 5: inputs = inputs.permute(0, 1, 3, 4, 2) else: raise ValueError("Unsupported data format for conv1x1.") inputs, inputs_without_pos = self._build_network_inputs(inputs, network_input_is_1d) modality_sizes = None # Size for each modality, only needed for multimodal return inputs, modality_sizes, inputs_without_pos class PerceiverOneHotPreprocessor(AbstractPreprocessor): """ One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input. Args: config ([`PerceiverConfig`]): Model configuration. """ def __init__(self, config: PerceiverConfig) -> None: super().__init__() self.config: PerceiverConfig = config @property def num_channels(self) -> int: return self.config.num_labels def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True): # Add a dummy index dimension. inputs = inputs[:, None, :] # No position encodings, so the 1st (input) and 3rd (inputs_without_pos) # outputs are identical. return inputs, None, inputs class PerceiverAudioPreprocessor(AbstractPreprocessor): """ Audio preprocessing for Perceiver Encoder. Args: config ([*PerceiverConfig*]): Model configuration. prep_type (`str`, *optional*, defaults to `"patches"`): Preprocessor type to use. Only "patches" is supported. samples_per_patch (`int`, *optional*, defaults to 96): Number of samples per patch. position_encoding_type (`str`, *optional*, defaults to `"fourier"`): Type of position encoding to use. Can be "trainable" or "fourier". concat_or_add_pos (`str`, *optional*, defaults to `"concat"`): How to concatenate the position encoding to the input. Can be "concat" or "add". out_channels (`int`, *optional*, defaults to 64): Number of channels in the output. project_pos_dim (`int`, *optional*, defaults to -1): Dimension of the position encoding to project to. If -1, no projection is applied. **position_encoding_kwargs (`Dict`, *optional*): Keyword arguments for the position encoding. """ def __init__( self, config, prep_type: str = "patches", samples_per_patch: int = 96, position_encoding_type: str = "fourier", concat_or_add_pos: str = "concat", out_channels=64, project_pos_dim=-1, **position_encoding_kwargs, ): super().__init__() self.config = config if prep_type not in ("patches",): raise ValueError(f"Prep_type {prep_type} is invalid, can only be 'patches'.") if concat_or_add_pos not in ["concat", "add"]: raise ValueError(f"Concat_or_pos {concat_or_add_pos} is invalid, can only be 'concat' or 'add'.") self.samples_per_patch = samples_per_patch self.position_encoding_type = position_encoding_type self.concat_or_add_pos = concat_or_add_pos self.project_pos_dim = project_pos_dim # Position embeddings self.position_embeddings, self.positions_projection = build_position_encoding( position_encoding_type=position_encoding_type, out_channels=out_channels, project_pos_dim=project_pos_dim, **position_encoding_kwargs, ) @property def num_channels(self) -> int: # position embedding if self.project_pos_dim > 0: pos_dim = self.project_pos_dim else: pos_dim = self.position_embeddings.output_size() if self.concat_or_add_pos == "add": return pos_dim return self.samples_per_patch + pos_dim def _build_network_inputs(self, inputs): """Construct the final input, including position encoding.""" batch_size = inputs.shape[0] index_dims = inputs.shape[1:-1] # Construct the position encoding. if self.position_encoding_type == "trainable": pos_enc = self.position_embeddings(batch_size) elif self.position_encoding_type == "fourier": pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype) # Optionally project them to a target dimension. pos_enc = self.positions_projection(pos_enc) if self.concat_or_add_pos == "concat": inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1) elif self.concat_or_add_pos == "add": inputs_with_pos = inputs + pos_enc return inputs_with_pos, inputs def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True): inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch]) inputs, inputs_without_pos = self._build_network_inputs(inputs) modality_sizes = None # Size for each modality, only needed for multimodal return inputs, modality_sizes, inputs_without_pos class PerceiverMultimodalPreprocessor(AbstractPreprocessor): """ Multimodal preprocessing for Perceiver Encoder. Inputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number of channels. Args: modalities (`Mapping[str, PreprocessorType]`): Dict mapping modality name to preprocessor. mask_probs (`Dict[str, float]`): Dict mapping modality name to masking probability of that modality. min_padding_size (`int`, *optional*, defaults to 2): The minimum padding size for all modalities. The final output will have num_channels equal to the maximum channels across all modalities plus min_padding_size. """ def __init__( self, modalities: Mapping[str, PreprocessorType], mask_probs: Optional[Mapping[str, float]] = None, min_padding_size: int = 2, ): super().__init__() self.modalities = nn.ModuleDict(modalities) self.min_padding_size = min_padding_size self.mask_probs = mask_probs if mask_probs is not None else {} self.padding = nn.ParameterDict( { modality: nn.Parameter(torch.randn(1, self.num_channels - preprocessor.num_channels)) for modality, preprocessor in modalities.items() } ) self.mask = nn.ParameterDict( {modality: nn.Parameter(torch.randn(1, self.num_channels)) for modality, _ in self.mask_probs.items()} ) @property def num_channels(self) -> int: max_channel_size = max(processor.num_channels for _, processor in self.modalities.items()) common_channel_size = max_channel_size + self.min_padding_size return common_channel_size def forward( self, inputs: Mapping[str, torch.Tensor], pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True ) -> PreprocessorOutputType: padded = {} modality_sizes = {} inputs_without_pos = {} for modality, preprocessor in self.modalities.items(): # preprocess each modality using the respective preprocessor. output, _, inputs_without_pos[modality] = preprocessor( inputs[modality], pos=pos, network_input_is_1d=network_input_is_1d ) # pad to the same common_channel_size. batch_size, num_samples, num_channels = output.shape pos_enc = self.padding[modality].expand(batch_size, -1, -1) padding = torch.broadcast_to( pos_enc, [batch_size, num_samples, self.num_channels - num_channels], ) output_padded = torch.cat([output, padding], dim=2) # mask if required if modality in self.mask_probs: mask_token = self.mask[modality].expand(batch_size, -1, -1) mask_prob = self.mask_probs[modality] mask = torch.bernoulli(torch.full([batch_size, num_samples], mask_prob)) mask = torch.unsqueeze(mask, dim=2).to(mask_token.device) output_padded = (1 - mask) * output_padded + mask * mask_token padded[modality] = output_padded modality_sizes[modality] = output_padded.shape[1] # Apply a predictable ordering to the modalities padded_ls = [padded[k] for k in sorted(padded.keys())] # Finally, concatenate along the time dimension final_inputs = torch.cat(padded_ls, dim=1) return final_inputs, modality_sizes, inputs_without_pos
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/perceiver/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _import_structure = { "configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"], "tokenization_perceiver": ["PerceiverTokenizer"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["feature_extraction_perceiver"] = ["PerceiverFeatureExtractor"] _import_structure["image_processing_perceiver"] = ["PerceiverImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_perceiver"] = [ "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST", "PerceiverForImageClassificationConvProcessing", "PerceiverForImageClassificationFourier", "PerceiverForImageClassificationLearned", "PerceiverForMaskedLM", "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "PerceiverForSequenceClassification", "PerceiverLayer", "PerceiverModel", "PerceiverPreTrainedModel", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/perceiver/image_processing_perceiver.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Perceiver.""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) class PerceiverImageProcessor(BaseImageProcessor): r""" Constructs a Perceiver image processor. Args: do_center_crop (`bool`, `optional`, defaults to `True`): Whether or not to center crop the image. If the input size if smaller than `crop_size` along any edge, the image will be padded with zeros and then center cropped. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`): Desired output size when applying center-cropping. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image to `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256} crop_size = get_size_dict(crop_size, param_name="crop_size") size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self._valid_processor_keys = [ "images", "do_center_crop", "crop_size", "do_resize", "size", "resample", "do_rescale", "rescale_factor", "do_normalize", "image_mean", "image_std", "return_tensors", "data_format", "input_data_format", ] def center_crop( self, image: np.ndarray, crop_size: Dict[str, int], size: Optional[int] = None, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Center crop an image to `(size["height"] / crop_size["height"] * min_dim, size["width"] / crop_size["width"] * min_dim)`. Where `min_dim = min(size["height"], size["width"])`. If the input size is smaller than `crop_size` along any edge, the image will be padded with zeros and then center cropped. Args: image (`np.ndarray`): Image to center crop. crop_size (`Dict[str, int]`): Desired output size after applying the center crop. size (`Dict[str, int]`, *optional*): Size of the image after resizing. If not provided, the self.size attribute will be used. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = self.size if size is None else size size = get_size_dict(size) crop_size = get_size_dict(crop_size, param_name="crop_size") height, width = get_image_size(image, channel_dim=input_data_format) min_dim = min(height, width) cropped_height = (size["height"] / crop_size["height"]) * min_dim cropped_width = (size["width"] / crop_size["width"]) * min_dim return center_crop( image, size=(cropped_height, cropped_width), data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, do_center_crop: Optional[bool] = None, crop_size: Optional[Dict[str, int]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image to `crop_size`. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Desired output size after applying the center crop. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_center_crop: images = [ self.center_crop(image, crop_size, size=size, input_data_format=input_data_format) for image in images ] if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/bertweet/tokenization_bertweet.py
# coding=utf-8 # Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team. # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for BERTweet""" import html import os import re from shutil import copyfile from typing import List, Optional, Tuple import regex from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.txt", "merges_file": "bpe.codes", } def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char pairs = set(pairs) return pairs class BertweetTokenizer(PreTrainedTokenizer): """ Constructs a BERTweet tokenizer, using Byte-Pair-Encoding. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. normalization (`bool`, *optional*, defaults to `False`): Whether or not to apply a normalization preprocess. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, merges_file, normalization=False, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", **kwargs, ): try: from emoji import demojize self.demojizer = demojize except ImportError: logger.warning( "emoji is not installed, thus not converting emoticons or emojis into text. Install emoji: pip3" " install emoji==0.6.0" ) self.demojizer = None self.vocab_file = vocab_file self.merges_file = merges_file self.encoder = {} self.encoder[str(bos_token)] = 0 self.encoder[str(pad_token)] = 1 self.encoder[str(eos_token)] = 2 self.encoder[str(unk_token)] = 3 self.add_from_file(vocab_file) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:-1]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} self.normalization = normalization self.tweetPreprocessor = TweetTokenizer() self.special_puncts = {"’": "'", "…": "..."} super().__init__( normalization=normalization, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, **kwargs, ) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERTweet sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) word = tuple(list(word[:-1]) + [word[-1] + "</w>"]) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = "@@ ".join(word) word = word[:-4] self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" if self.normalization: # Perform Tweet normalization before performing BPE text = self.normalizeTweet(text) split_tokens = [] words = re.findall(r"\S+\n?", text) for token in words: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def normalizeTweet(self, tweet): """ Normalize a raw Tweet """ for punct in self.special_puncts: tweet = tweet.replace(punct, self.special_puncts[punct]) tokens = self.tweetPreprocessor.tokenize(tweet) normTweet = " ".join([self.normalizeToken(token) for token in tokens]) normTweet = ( normTweet.replace("cannot ", "can not ") .replace("n't ", " n't ") .replace("n 't ", " n't ") .replace("ca n't", "can't") .replace("ai n't", "ain't") ) normTweet = ( normTweet.replace("'m ", " 'm ") .replace("'re ", " 're ") .replace("'s ", " 's ") .replace("'ll ", " 'll ") .replace("'d ", " 'd ") .replace("'ve ", " 've ") ) normTweet = ( normTweet.replace(" p . m .", " p.m.") .replace(" p . m ", " p.m ") .replace(" a . m .", " a.m.") .replace(" a . m ", " a.m ") ) return " ".join(normTweet.split()) def normalizeToken(self, token): """ Normalize tokens in a Tweet """ lowercased_token = token.lower() if token.startswith("@"): return "@USER" elif lowercased_token.startswith("http") or lowercased_token.startswith("www"): return "HTTPURL" elif len(token) == 1: if token in self.special_puncts: return self.special_puncts[token] if self.demojizer is not None: return self.demojizer(token) else: return token else: return token def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace("@@ ", "").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) out_merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file): copyfile(self.merges_file, out_merge_file) return out_vocab_file, out_merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far) def add_from_file(self, f): """ Loads a pre-existing dictionary from a text file and adds its symbols to this instance. """ if isinstance(f, str): try: with open(f, "r", encoding="utf-8") as fd: self.add_from_file(fd) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset") return lines = f.readlines() for lineTmp in lines: line = lineTmp.strip() idx = line.rfind(" ") if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'") word = line[:idx] self.encoder[word] = len(self.encoder) # Natural Language Toolkit: Twitter Tokenizer # # Copyright (C) 2001-2020 NLTK Project # Author: Christopher Potts <cgpotts@stanford.edu> # Ewan Klein <ewan@inf.ed.ac.uk> (modifications) # Pierpaolo Pantone <> (modifications) # URL: http://nltk.org/ # For license information, see LICENSE.TXT # """ Twitter-aware tokenizer, designed to be flexible and easy to adapt to new domains and tasks. The basic logic is this: 1. The tuple regex_strings defines a list of regular expression strings. 2. The regex_strings strings are put, in order, into a compiled regular expression object called word_re. 3. The tokenization is done by word_re.findall(s), where s is the user-supplied string, inside the tokenize() method of the class Tokenizer. 4. When instantiating Tokenizer objects, there is a single option: preserve_case. By default, it is set to True. If it is set to False, then the tokenizer will lowercase everything except for emoticons. """ ###################################################################### # # import regex # https://github.com/nltk/nltk/issues/2409 # import html # ###################################################################### # The following strings are components in the regular expression # that is used for tokenizing. It's important that phone_number # appears first in the final regex (since it can contain whitespace). # It also could matter that tags comes after emoticons, due to the # possibility of having text like # # <:| and some text >:) # # Most importantly, the final element should always be last, since it # does a last ditch whitespace-based tokenization of whatever is left. # ToDo: Update with http://en.wikipedia.org/wiki/List_of_emoticons ? # This particular element is used in a couple ways, so we define it # with a name: # docstyle-ignore EMOTICONS = r""" (?: [<>]? [:;=8] # eyes [\-o\*\']? # optional nose [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth | [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth [\-o\*\']? # optional nose [:;=8] # eyes [<>]? | <3 # heart )""" # URL pattern due to John Gruber, modified by Tom Winzig. See # https://gist.github.com/winzig/8894715 # docstyle-ignore URLS = r""" # Capture 1: entire matched URL (?: https?: # URL protocol and colon (?: /{1,3} # 1-3 slashes | # or [a-z0-9%] # Single letter or digit or '%' # (Trying not to match e.g. "URI::Escape") ) | # or # looks like domain name followed by a slash: [a-z0-9.\-]+[.] (?:[a-z]{2,13}) / ) (?: # One or more: [^\s()<>{}\[\]]+ # Run of non-space, non-()<>{}[] | # or \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...) | \([^\s]+?\) # balanced parens, non-recursive: (...) )+ (?: # End with: \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...) | \([^\s]+?\) # balanced parens, non-recursive: (...) | # or [^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars ) | # OR, the following to match naked domains: (?: (?<!@) # not preceded by a @, avoid matching foo@_gmail.com_ [a-z0-9]+ (?:[.\-][a-z0-9]+)* [.] (?:[a-z]{2,13}) \b /? (?!@) # not succeeded by a @, # avoid matching "foo.na" in "foo.na@example.com" ) """ # docstyle-ignore # The components of the tokenizer: REGEXPS = ( URLS, # Phone numbers: r""" (?: (?: # (international) \+?[01] [ *\-.\)]* )? (?: # (area code) [\(]? \d{3} [ *\-.\)]* )? \d{3} # exchange [ *\-.\)]* \d{4} # base )""", # ASCII Emoticons EMOTICONS, # HTML tags: r"""<[^>\s]+>""", # ASCII Arrows r"""[\-]+>|<[\-]+""", # Twitter username: r"""(?:@[\w_]+)""", # Twitter hashtags: r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""", # email addresses r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""", # docstyle-ignore # Remaining word types: r""" (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes. | (?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals. | (?:[\w_]+) # Words without apostrophes or dashes. | (?:\.(?:\s*\.){1,}) # Ellipsis dots. | (?:\S) # Everything else that isn't whitespace. """, ) ###################################################################### # This is the core tokenizing regex: WORD_RE = regex.compile(r"""(%s)""" % "|".join(REGEXPS), regex.VERBOSE | regex.I | regex.UNICODE) # WORD_RE performs poorly on these patterns: HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}") # The emoticon string gets its own regex so that we can preserve case for # them as needed: EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE) # These are for regularizing HTML entities to Unicode: ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);") ###################################################################### # Functions for converting html entities ###################################################################### def _str_to_unicode(text, encoding=None, errors="strict"): if encoding is None: encoding = "utf-8" if isinstance(text, bytes): return text.decode(encoding, errors) return text def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8"): """ Remove entities from text by converting them to their corresponding unicode character. Args: text: A unicode string or a byte string encoded in the given *encoding* (which defaults to 'utf-8'). keep (list): List of entity names which should not be replaced. This supports both numeric entities (`&#nnnn;` and `&#hhhh;`) and named entities (such as `&nbsp;` or `&gt;`). remove_illegal (bool): If `True`, entities that can't be converted are removed. Otherwise, entities that can't be converted are kept "as is". Returns: A unicode string with the entities removed. See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py Examples: ```python >>> from nltk.tokenize.casual import _replace_html_entities >>> _replace_html_entities(b"Price: &pound;100") 'Price: \\xa3100' >>> print(_replace_html_entities(b"Price: &pound;100")) Price: £100 ```""" def _convert_entity(match): entity_body = match.group(3) if match.group(1): try: if match.group(2): number = int(entity_body, 16) else: number = int(entity_body, 10) # Numeric character references in the 80-9F range are typically # interpreted by browsers as representing the characters mapped # to bytes 80-9F in the Windows-1252 encoding. For more info # see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets if 0x80 <= number <= 0x9F: return bytes((number,)).decode("cp1252") except ValueError: number = None else: if entity_body in keep: return match.group(0) else: number = html.entities.name2codepoint.get(entity_body) if number is not None: try: return chr(number) except (ValueError, OverflowError): pass return "" if remove_illegal else match.group(0) return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding)) ###################################################################### class TweetTokenizer: r""" Examples: ```python >>> # Tokenizer for tweets. >>> from nltk.tokenize import TweetTokenizer >>> tknzr = TweetTokenizer() >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--" >>> tknzr.tokenize(s0) ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--'] >>> # Examples using *strip_handles* and *reduce_len parameters*: >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True) >>> s1 = "@remy: This is waaaaayyyy too much for you!!!!!!" >>> tknzr.tokenize(s1) [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!'] ```""" def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False): self.preserve_case = preserve_case self.reduce_len = reduce_len self.strip_handles = strip_handles def tokenize(self, text): """ Args: text: str Returns: list(str) A tokenized list of strings; concatenating this list returns the original string if `preserve_case=False` """ # Fix HTML character entities: text = _replace_html_entities(text) # Remove username handles if self.strip_handles: text = remove_handles(text) # Normalize word lengthening if self.reduce_len: text = reduce_lengthening(text) # Shorten problematic sequences of characters safe_text = HANG_RE.sub(r"\1\1\1", text) # Tokenize: words = WORD_RE.findall(safe_text) # Possibly alter the case, but avoid changing emoticons like :D into :d: if not self.preserve_case: words = [x if EMOTICON_RE.search(x) else x.lower() for x in words] return words ###################################################################### # Normalization Functions ###################################################################### def reduce_lengthening(text): """ Replace repeated character sequences of length 3 or greater with sequences of length 3. """ pattern = regex.compile(r"(.)\1{2,}") return pattern.sub(r"\1\1\1", text) def remove_handles(text): """ Remove Twitter username handles from text. """ pattern = regex.compile( r"(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){20}(?!@))|(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){1,19})(?![A-Za-z0-9_]*@)" ) # Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly return pattern.sub(" ", text) ###################################################################### # Tokenization Function ###################################################################### def casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=False): """ Convenience function for wrapping the tokenizer. """ return TweetTokenizer(preserve_case=preserve_case, reduce_len=reduce_len, strip_handles=strip_handles).tokenize( text ) ###############################################################################
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/bertweet/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule _import_structure = {"tokenization_bertweet": ["BertweetTokenizer"]} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/sew/modeling_sew.py
# coding=utf-8 # Copyright 2021 ASAPP Inc. and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch SEW model.""" import math import warnings from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, ) from .configuration_sew import SEWConfig if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 1 # General docstring _CONFIG_FOR_DOC = "SEWConfig" # Base docstring _CHECKPOINT_FOR_DOC = "asapp/sew-tiny-100k-ft-ls100h" _EXPECTED_OUTPUT_SHAPE = [1, 292, 512] # CTC docstring _CTC_EXPECTED_OUTPUT = ( "'MISTER QUILTER IS THE APPOSTILE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPOLLE'" ) _CTC_EXPECTED_LOSS = 0.42 # Audio class docstring _SEQ_CLASS_CHECKPOINT = "anton-l/sew-mid-100k-ft-keyword-spotting" _SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'" _SEQ_CLASS_EXPECTED_LOSS = 9.52 from ..deprecated._archive_maps import SEW_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->SEW class SEWNoLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->SEW class SEWLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->SEW class SEWGroupNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class SEWPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, stride=config.squeeze_factor, ) if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) deepspeed.zero.register_external_parameter(self, self.conv.weight_v) deepspeed.zero.register_external_parameter(self, self.conv.weight_g) else: self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) self.padding = SEWSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->SEW class SEWSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states class SEWUpsampling(nn.Module): def __init__(self, config): super().__init__() self.projection = nn.Linear(config.hidden_size, config.hidden_size * config.squeeze_factor) self.activation = ACT2FN[config.feat_extract_activation] self.squeeze_factor = config.squeeze_factor def forward(self, hidden_states): hidden_states = self.projection(hidden_states) hidden_states = self.activation(hidden_states) if self.squeeze_factor > 1: # transform embedding channels to sequence length bsz, src_len, src_embed_dim = hidden_states.size() tgt_len = src_len * self.squeeze_factor tgt_embed_dim = src_embed_dim // self.squeeze_factor hidden_states = hidden_states.reshape(bsz, src_len, self.squeeze_factor, tgt_embed_dim) hidden_states = hidden_states.reshape(bsz, tgt_len, tgt_embed_dim) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->SEW class SEWFeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [SEWGroupNormConvLayer(config, layer_id=0)] + [ SEWNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [SEWLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: if self._requires_grad and self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func( conv_layer.__call__, hidden_states, ) else: hidden_states = conv_layer(hidden_states) return hidden_states class SEWFeatureExtractor(SEWFeatureEncoder): def __init__(self, config): super().__init__(config) warnings.warn( f"The class `{self.__class__.__name__}` has been depreciated " "and will be removed in Transformers v5. " f"Use `{self.__class__.__bases__[0].__name__}` instead.", FutureWarning, ) # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->SEW class SEWAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[SEWConfig] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value # Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->SEW class SEWFlashAttention2(SEWAttention): """ SEW flash attention module. This module inherits from `SEWAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # SEWFlashAttention2 attention does not support output_attentions if output_attentions: raise ValueError("SEWFlashAttention2 attention does not support output_attentions") # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, q_len, _ = hidden_states.size() # get query proj query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0].transpose(1, 2) value_states = past_key_value[1].transpose(1, 2) elif is_cross_attention: # cross_attentions key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) else: # self_attention key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout ) attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`float`): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal ) return attn_output # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) value_layer = index_first_axis( value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) class SEWSdpaAttention(SEWAttention): # Copied from transformers.models.bart.modeling_bart.BartSdpaAttention.forward with Bart->SEW def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" if output_attentions or layer_head_mask is not None: # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented. logger.warning_once( "SEWModel is using SEWSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention" ' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states, key_value_states=key_value_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) query_states = self._shape(query_states, tgt_len, bsz) # NOTE: SDPA with memory-efficient backend is currently (torch==2.1.2) bugged when using non-contiguous inputs and a custom attn_mask, # but we are fine here as `_shape` do call `.contiguous()`. Reference: https://github.com/pytorch/pytorch/issues/112577 attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout if self.training else 0.0, # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case tgt_len == 1. is_causal=self.is_causal and attention_mask is None and tgt_len > 1, ) if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, None, past_key_value SEW_ATTENTION_CLASSES = { "eager": SEWAttention, "sdpa": SEWSdpaAttention, "flash_attention_2": SEWFlashAttention2, } # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->SEW class SEWFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->SEW, WAV2VEC2->SEW class SEWEncoderLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = SEW_ATTENTION_CLASSES[config._attn_implementation]( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = SEWFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, output_attentions=False): attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class SEWEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = SEWPositionalConvEmbedding(config) self.pool = nn.AvgPool1d(config.squeeze_factor, config.squeeze_factor) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([SEWEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.upsample = SEWUpsampling(config) self.gradient_checkpointing = False self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: if self._use_flash_attention_2: # make sure padded tokens output 0 hidden_states[~attention_mask] = 0.0 # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None else: # make sure padded tokens output 0 hidden_states[~attention_mask] = 0.0 input_lengths = (attention_mask.long()).sum(-1) # apply pooling formula to get real output_lengths output_lengths = input_lengths // self.config.squeeze_factor max_encoder_length = hidden_states.shape[1] // self.config.squeeze_factor attention_ids = ( torch.arange(0, max_encoder_length, device=output_lengths.device) .view(1, -1) .expand(output_lengths.shape[0], -1) ) attention_mask = (attention_ids < output_lengths.view(-1, 1)).long() # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) n_input_timesteps = hidden_states.shape[1] hidden_states = hidden_states.transpose(1, 2) position_embeddings = self.pos_conv_embed(hidden_states) pooled_hidden_states = self.pool(hidden_states) min_length = min(position_embeddings.size(-1), pooled_hidden_states.size(-1)) hidden_states = pooled_hidden_states[..., :min_length] + position_embeddings[..., :min_length] hidden_states = hidden_states.transpose(1, 2) hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, attention_mask, output_attentions, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.upsample(hidden_states) if hidden_states.shape[1] < n_input_timesteps: hidden_states = nn.functional.pad(hidden_states, (0, 0, 0, n_input_timesteps - hidden_states.shape[1])) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class SEWPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SEWConfig base_model_prefix = "sew" main_input_name = "input_values" supports_gradient_checkpointing = True _supports_flash_attn_2 = True _supports_sdpa = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, SEWPositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): if is_deepspeed_zero3_enabled(): import deepspeed if hasattr(module, "weight_v") and hasattr(module, "weight_g"): with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0): nn.init.kaiming_normal_(module.weight.data) else: with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0): nn.init.kaiming_normal_(module.weight.data) else: nn.init.kaiming_normal_(module.weight.data) if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None: module.bias.data.zero_() def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask SEW_START_DOCSTRING = r""" SEW was proposed in [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.). This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`SEWConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SEW_INPUTS_DOCSTRING = r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare SEW Model transformer outputting raw hidden-states without any specific head on top.", SEW_START_DOCSTRING, ) class SEWModel(SEWPreTrainedModel): def __init__(self, config: SEWConfig): super().__init__(config) self.config = config self.feature_extractor = SEWFeatureEncoder(config) self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.project_features = config.conv_dim[-1] != config.hidden_size if self.project_features: self.feature_projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.feature_dropout = nn.Dropout(config.feat_proj_dropout) if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) self.encoder = SEWEncoder(config) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) extract_features = self.layer_norm(extract_features) if self.project_features: extract_features = self.feature_projection(extract_features) hidden_states = self.feature_dropout(extract_features) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """SEW Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", SEW_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->SEW, wav2vec2->sew, WAV_2_VEC_2->SEW class SEWForCTC(SEWPreTrainedModel): def __init__(self, config, target_lang: Optional[str] = None): super().__init__(config) self.sew = SEWModel(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `SEWForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() def tie_weights(self): """ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when passing `target_lang=...` to `from_pretrained(...)`. This method is **not** supposed to be called by the user and is prone to be changed in the future. """ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to # correctly load adapter layers for SEW so that we do not have to introduce a new API to # [`PreTrainedModel`]. While slightly hacky, SEW never has to tie input and output embeddings, so that it is # ok to repurpose this function here. target_lang = self.target_lang if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None: raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.") elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None: logger.info("By default `target_lang` is set to 'eng'.") elif target_lang is not None: self.load_adapter(target_lang, force_load=True) def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.sew.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.sew.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.sew( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: if labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( """ SEW Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. """, SEW_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->SEW, wav2vec2->sew, WAV_2_VEC_2->SEW class SEWForSequenceClassification(SEWPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of SEW adapters (config.add_adapter=True)" ) self.sew = SEWModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. " "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.sew.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.sew.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_SEQ_CLASS_CHECKPOINT, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.sew( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states[~padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/sew/configuration_sew.py
# coding=utf-8 # Copyright 2021 ASAPP Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ SEW model configuration""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class SEWConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SEWModel`]. It is used to instantiate a SEW model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SEW [asapp/sew-tiny-100k](https://huggingface.co/asapp/sew-tiny-100k) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the SEW model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SEW`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. squeeze_factor (`int`, *optional*, defaults to 2): Sequence length downsampling factor after the encoder and upsampling factor after the transformer. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`SEWForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0),: The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`SEWForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`SEWForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`Wav2Vec2ForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. Example: ```python >>> from transformers import SEWConfig, SEWModel >>> # Initializing a SEW asapp/sew-tiny-100k style configuration >>> configuration = SEWConfig() >>> # Initializing a model (with random weights) from the asapp/sew-tiny-100k style configuration >>> model = SEWModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "sew" def __init__( self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, squeeze_factor=2, hidden_act="gelu", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), conv_stride=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), conv_kernel=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction="mean", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.squeeze_factor = squeeze_factor self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. " "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, " f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) " f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # sequence classification self.use_weighted_layer_sum = use_weighted_layer_sum self.classifier_proj_size = classifier_proj_size @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/sew/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_sew"] = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SEW checkpoint.""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights(fairseq_model, hf_model, is_finetuned): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True else: for key, mapped_key in MAPPING.items(): mapped_key = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "weight" in name: weight_type = "weight" elif "bias" in name: weight_type = "bias" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) def convert_config(model, is_finetuned): config = SEWConfig() if is_finetuned: fs_config = model.w2v_encoder.w2v_model.cfg else: fs_config = model.cfg config.conv_bias = fs_config.conv_bias conv_layers = eval(fs_config.conv_feature_layers) config.conv_dim = [x[0] for x in conv_layers] config.conv_kernel = [x[1] for x in conv_layers] config.conv_stride = [x[2] for x in conv_layers] config.feat_extract_activation = "gelu" config.feat_extract_norm = "layer" if fs_config.extractor_mode == "layer_norm" else "group" config.final_dropout = 0.0 config.hidden_act = fs_config.activation_fn.name config.hidden_size = fs_config.encoder_embed_dim config.initializer_range = 0.02 config.intermediate_size = fs_config.encoder_ffn_embed_dim config.layer_norm_eps = 1e-5 config.layerdrop = fs_config.encoder_layerdrop config.num_attention_heads = fs_config.encoder_attention_heads config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups config.num_conv_pos_embeddings = fs_config.conv_pos config.num_feat_extract_layers = len(conv_layers) config.num_hidden_layers = fs_config.encoder_layers config.squeeze_factor = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: fs_config = model.cfg config.final_dropout = fs_config.final_dropout config.layerdrop = fs_config.layerdrop config.activation_dropout = fs_config.activation_dropout config.apply_spec_augment = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 config.attention_dropout = fs_config.attention_dropout config.feat_proj_dropout = fs_config.dropout_input config.hidden_dropout = fs_config.dropout config.mask_feature_length = fs_config.mask_channel_length config.mask_feature_prob = fs_config.mask_channel_prob config.mask_time_length = fs_config.mask_length config.mask_time_prob = fs_config.mask_prob config.feature_extractor_type = "Wav2Vec2FeatureExtractor" config.tokenizer_class = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def convert_sew_checkpoint( checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True ): """ Copy/paste/tweak model's weights to transformers design. """ if is_finetuned: model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])} ) else: model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) if config_path is not None: config = SEWConfig.from_pretrained(config_path) else: config = convert_config(model[0], is_finetuned) model = model[0].eval() return_attention_mask = True if config.feat_extract_norm == "layer" else False feature_extractor = Wav2Vec2FeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=True, return_attention_mask=return_attention_mask, ) if is_finetuned: if dict_path: target_dict = Dictionary.load(dict_path) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq target_dict.indices[target_dict.bos_word] = target_dict.pad_index target_dict.indices[target_dict.pad_word] = target_dict.bos_index config.bos_token_id = target_dict.pad_index config.pad_token_id = target_dict.bos_index config.eos_token_id = target_dict.eos_index config.vocab_size = len(target_dict.symbols) vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json") if not os.path.isdir(pytorch_dump_folder_path): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path)) return os.makedirs(pytorch_dump_folder_path, exist_ok=True) with open(vocab_path, "w", encoding="utf-8") as vocab_handle: json.dump(target_dict.indices, vocab_handle) tokenizer = Wav2Vec2CTCTokenizer( vocab_path, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=False, ) processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) processor.save_pretrained(pytorch_dump_folder_path) hf_model = SEWForCTC(config) else: hf_model = SEWModel(config) feature_extractor.save_pretrained(pytorch_dump_folder_path) recursively_load_weights(model, hf_model, is_finetuned) hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) args = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/cpmant/configuration_cpmant.py
# coding=utf-8 # Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ CPMAnt model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class CpmAntConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`CpmAntModel`]. It is used to instantiate an CPMAnt model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CPMAnt [openbmb/cpm-ant-10b](https://huggingface.co/openbmb/cpm-ant-10b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30720): Vocabulary size of the CPMAnt model. Defines the number of different tokens that can be represented by the `input` passed when calling [`CpmAntModel`]. hidden_size (`int`, *optional*, defaults to 4096): Dimension of the encoder layers. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads in the Transformer encoder. dim_head (`int`, *optional*, defaults to 128): Dimension of attention heads for each attention layer in the Transformer encoder. dim_ff (`int`, *optional*, defaults to 10240): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 48): Number of layers of the Transformer encoder. dropout_p (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder. position_bias_num_buckets (`int`, *optional*, defaults to 512): The number of position_bias buckets. position_bias_max_distance (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. init_std (`float`, *optional*, defaults to 1.0): Initialize parameters with std = init_std. prompt_types (`int`, *optional*, defaults to 32): The type of prompt. prompt_length (`int`, *optional*, defaults to 32): The length of prompt. segment_types (`int`, *optional*, defaults to 32): The type of segment. use_cache (`bool`, *optional*, defaults to `True`): Whether to use cache. Example: ```python >>> from transformers import CpmAntModel, CpmAntConfig >>> # Initializing a CPMAnt cpm-ant-10b style configuration >>> configuration = CpmAntConfig() >>> # Initializing a model from the cpm-ant-10b style configuration >>> model = CpmAntModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "cpmant" def __init__( self, vocab_size: int = 30720, hidden_size: int = 4096, num_attention_heads: int = 32, dim_head: int = 128, dim_ff: int = 10240, num_hidden_layers: int = 48, dropout_p: int = 0.0, position_bias_num_buckets: int = 512, position_bias_max_distance: int = 2048, eps: int = 1e-6, init_std: float = 1.0, prompt_types: int = 32, prompt_length: int = 32, segment_types: int = 32, use_cache: bool = True, **kwargs, ): super().__init__(**kwargs) self.prompt_types = prompt_types self.prompt_length = prompt_length self.segment_types = segment_types self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.dim_head = dim_head self.dim_ff = dim_ff self.num_hidden_layers = num_hidden_layers self.position_bias_num_buckets = position_bias_num_buckets self.position_bias_max_distance = position_bias_max_distance self.dropout_p = dropout_p self.eps = eps self.use_cache = use_cache self.vocab_size = vocab_size self.init_std = init_std
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/cpmant/modeling_cpmant.py
# coding=utf-8 # Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch CPMAnt""" import math from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_cpmant import CpmAntConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "openbmb/cpm-ant-10b" _CONFIG_FOR_DOC = "CpmAntConfig" from ..deprecated._archive_maps import CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 class CpmAntLayerNorm(nn.Module): """ We use Root Mean Square (RMS) Layer Normalization, please see https://arxiv.org/abs/1910.07467 for details." """ def __init__(self, config: CpmAntConfig): super().__init__() self.eps = config.eps self.dim_norm = config.hidden_size self.weight = nn.Parameter(torch.empty(config.hidden_size)) def forward(self, hidden_states: torch.Tensor): """ Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`) """ if hidden_states.size(-1) != self.dim_norm: raise AssertionError("hidden_states.size(-1) != self.dim_norm") old_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(dim=-1, keepdim=True) hidden_states = (hidden_states * torch.rsqrt(variance + self.eps)).to(old_dtype) * self.weight return hidden_states class CpmAntAttention(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.dim_model = config.hidden_size self.num_heads = config.num_attention_heads self.dim_head = config.dim_head self.project_q = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False) self.project_k = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False) self.project_v = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False) self.attention_out = nn.Linear(self.num_heads * self.dim_head, self.dim_model, bias=False) self.softmax = torch.nn.Softmax(dim=-1) if config.dropout_p is not None: self.dropout = torch.nn.Dropout(p=config.dropout_p) else: self.dropout = None def forward( self, hidden_q: torch.Tensor, hidden_kv: torch.Tensor, attention_mask: torch.BoolTensor, position_bias: torch.Tensor, output_attentions: Optional[bool] = False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: Optional[bool] = None, ): """ Args: hidden_q (`torch.Tensor`): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences. hidden_kv (`torch.Tensor` of shape `(batch, len_k, dim_model)`)): Tensor *key_value* and *query* of shape `(batch, len_k, dim_model)` attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Avoid invalid areas to participate in the calculation of self-attention. position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Provide positional information to self-attention block. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Tuple[torch.Tensor, torch.Tensor]`, *optional*): Cached past key and value projection states. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ batch_size = hidden_q.size(0) len_q = hidden_q.size(1) len_k = hidden_kv.size(1) query = self.project_q(hidden_q) key = self.project_k(hidden_kv) value = self.project_v(hidden_kv) query = query.view(batch_size, len_q, self.num_heads, self.dim_head).permute(0, 2, 1, 3) key = key.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3) value = value.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3) if past_key_values is not None: key = torch.cat([past_key_values[0], key], dim=-2) value = torch.cat([past_key_values[1], value], dim=-2) len_k = key.size(-2) # (batch_size, num_heads, len_q, dim_head) @ (batch_size, num_heads, dim_head, len_k) -> (batch_size, num_heads, len_q, len_k) score = torch.matmul(query, key.transpose(-1, -2)) / math.sqrt(self.dim_head) score = score + position_bias score = torch.masked_fill( score, attention_mask.view(batch_size, 1, len_q, len_k) == torch.tensor(False), torch.scalar_tensor(float("-inf"), device=score.device, dtype=score.dtype), ) score = self.softmax(score) score = torch.masked_fill( score, attention_mask.view(batch_size, 1, len_q, len_k) == torch.tensor(False), torch.scalar_tensor(0, device=score.device, dtype=score.dtype), ) if output_attentions: attn_weights = score else: attn_weights = None if self.dropout is not None: score = self.dropout(score) # (batch_size, num_heads, len_q, len_k) @ (batch_size, num_heads, len_k, dim_head) -> (batch_size, num_heads, len_q, dim_head) score = torch.matmul(score, value) score = score.view(batch_size, self.num_heads, len_q, self.dim_head).permute(0, 2, 1, 3) score = score.contiguous().view(batch_size, len_q, self.num_heads * self.dim_head) score = self.attention_out(score) past_key_values = None if use_cache: past_key_values = (key, value) return score, attn_weights, past_key_values class CpmAntSelfAttentionBlock(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.layernorm_before_attention = CpmAntLayerNorm(config) self.self_attention = CpmAntAttention(config) if config.dropout_p: self.dropout = torch.nn.Dropout(config.dropout_p) else: self.dropout = None def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: Optional[bool] = None, ): """ Args: hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`): Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences. attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Avoid invalid areas to participate in the calculation of self-attention. position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`): Provide positional information to self-attention block. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Tuple(torch.FloatTensor)`, *optional*): Cached past key and value projection states. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ outputs = self.layernorm_before_attention(hidden_states) outputs = self.self_attention( outputs, outputs, attention_mask, position_bias, output_attentions, past_key_values, use_cache ) outputs, attn_weights, current_key_value = outputs if self.dropout is not None: outputs = self.dropout(outputs) hidden_states = hidden_states + outputs return hidden_states, attn_weights, current_key_value class CpmAntDenseGatedACT(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.w_0 = nn.Linear(config.hidden_size, config.dim_ff, bias=False) self.w_1 = nn.Linear(config.hidden_size, config.dim_ff, bias=False) self.act = torch.nn.GELU() def forward(self, hidden_states: torch.Tensor): """Transform an input tensor from one feature space to another via a nonlinear operation Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`) """ gate_score = self.act(self.w_0(hidden_states)) hidden_states = self.w_1(hidden_states) hidden_states = gate_score * hidden_states return hidden_states class CpmAntFeedForward(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.w_in = CpmAntDenseGatedACT(config) if config.dropout_p is not None: self.dropout = torch.nn.Dropout(config.dropout_p) else: self.dropout = None self.w_out = nn.Linear(config.dim_ff, config.hidden_size, bias=False) def forward(self, hidden_states: torch.Tensor): """ Args: hidden_states (`torch.Tensor` of shape `(batch, seq_len, dim_in)`) """ hidden_states = self.w_in(hidden_states) if self.dropout is not None: hidden_states = self.dropout(hidden_states) hidden_states = self.w_out(hidden_states) return hidden_states class CpmAntFFNBlock(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.layernorm_before_ffn = CpmAntLayerNorm(config) self.ffn = CpmAntFeedForward(config) if config.dropout_p: self.dropout = torch.nn.Dropout(config.dropout_p) else: self.dropout = None def forward( self, hidden_states: torch.Tensor, ): """ Args: hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`): Hidden states before feed forward layer. """ ln_outputs = self.layernorm_before_ffn(hidden_states) outputs = self.ffn(ln_outputs) if self.dropout is not None: outputs = self.dropout(outputs) hidden_states = hidden_states + outputs return hidden_states class CpmAntTransformerBlock(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.self_att = CpmAntSelfAttentionBlock(config) self.ffn = CpmAntFFNBlock(config) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: Optional[bool] = None, ): """ Args: hidden_states (`torch.Tensor`): Input to the layer of shape `(batch, seq_len, dim_model)` attention_mask (`torch.Tensor`): Avoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)` position_bias (`torch.Tensor`): Provides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. past_key_values (`Tuple[torch.Tensor, torch.Tensor])`, *optional*): Cached past key and value projection states use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ hidden_states = self.self_att( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, past_key_values=past_key_values, use_cache=use_cache, ) hidden_states, attn_weights, current_key_value = hidden_states hidden_states = self.ffn(hidden_states) return hidden_states, attn_weights, current_key_value class CpmAntEncoder(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.num_layers = config.num_hidden_layers self.layers = nn.ModuleList([CpmAntTransformerBlock(config) for ith in range(self.num_layers)]) self.output_layernorm = CpmAntLayerNorm(config) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: torch.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: Optional[bool] = None, ): """ Args: hidden_states (`torch.Tensor`): Input to the layer of shape `(batch, seq_len, dim_model)` attention_mask (`torch.Tensor`): Avoid invalid areas to participate in the calculation of shape `(batch, seq_len, seq_len)` position_bias (`torch.Tensor`): Provides position information to attention mechanism of shape `(num_heads, seq_len, seq_len)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. past_key_values (`Tuple[torch.Tensor, torch.Tensor])`, *optional*): Cached past key and value projection states use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None current_key_values = () if use_cache else None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, position_bias, output_attentions=output_attentions, past_key_values=past_key_values[i] if past_key_values else None, use_cache=use_cache, ) hidden_states, attn_weights, current_key_value = layer_outputs if output_attentions: all_self_attns += (attn_weights,) if current_key_value is not None: current_key_values = current_key_values + (current_key_value,) hidden_states = self.output_layernorm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) return hidden_states, current_key_values, all_hidden_states, all_self_attns # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->CPMAnt class CpmAntIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class CpmAntSegmentPositionEmbedding(nn.Module): def __init__(self, config: CpmAntConfig): super().__init__() self.num_heads = config.num_attention_heads self.num_buckets = config.position_bias_num_buckets self.max_distance = config.position_bias_max_distance self.num_segments = config.segment_types self.relative_attention_bias = nn.Parameter( torch.empty( config.segment_types * config.segment_types + config.position_bias_num_buckets, config.num_attention_heads, ) ) def forward( self, key_pos: torch.Tensor, query_pos: torch.Tensor, key_segment: torch.Tensor, query_segment: torch.Tensor, ): with torch.no_grad(): batch = key_pos.size(0) keylen = key_pos.size(1) querylen = query_pos.size(1) if key_pos.size(0) != query_pos.size(0): raise AssertionError( f"key_pos.size(0) should be equal to query_pos.size(0), but got {key_pos.size(0)} and {query_pos.size(0)}!" ) if keylen != key_segment.size(1) or querylen != query_segment.size(1): raise AssertionError( f"keylen should be equal to key_segment.size(1), but got {keylen} and {key_segment.size(1)}!" ) if querylen != query_segment.size(1): raise AssertionError( f"querylen should be equal to query_segment.size(1), but got {querylen} and {query_segment.szie(1)}!" ) key_pos = key_pos.view(batch, -1, keylen) query_pos = query_pos.view(batch, querylen, -1) key_segment = key_segment.view(batch, -1, keylen) query_segment = query_segment.view(batch, querylen, -1) relative_position_bucket = self._segment_relative_position_bucket(query_segment, key_segment) relative_position_bucket = relative_position_bucket + self.num_buckets # (batch, len_q, len_k) absolute_position_bucket = self._position_bucket( torch.arange(keylen, dtype=torch.int32, device=relative_position_bucket.device)[None, :] - torch.arange(querylen, dtype=torch.int32, device=relative_position_bucket.device)[:, None], num_buckets=self.num_buckets, max_distance=self.max_distance, ) relative_position_bucket = torch.where( (key_segment == query_segment), absolute_position_bucket[None, :, :], relative_position_bucket, ) # (batch, len_q, len_k, num_heads) embeds = F.embedding(relative_position_bucket, self.relative_attention_bias) # (batch, num_heads, len_q, len_k) embeds = embeds.permute(0, 3, 1, 2).contiguous() return embeds def _segment_relative_position_bucket(self, query_segment, key_segment): return query_segment * self.num_segments + key_segment def _position_bucket(self, relative_position, num_buckets=32, max_distance=128): relative_buckets = 0 # always bidirectional in CPMAnt num_buckets //= 2 relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets relative_position = torch.abs(relative_position) max_exact = num_buckets // 2 is_small = relative_position < max_exact relative_postion_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.int32) relative_postion_if_large = torch.min( relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1), ) relative_buckets += torch.where(is_small, relative_position.to(torch.int32), relative_postion_if_large) return relative_buckets # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->CPMAnt class CpmAntOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class CpmAntPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = CpmAntConfig base_model_prefix = "cpmant" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, CpmAntLayerNorm): module.weight.data.fill_(1.0) elif isinstance(module, CpmAntSegmentPositionEmbedding): module.relative_attention_bias.data.normal_(mean=0.0, std=self.config.init_std) CPMANT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters config ([`~CpmAntConfig`]): Model configuration class with all the parameters of the Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CPMANT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare CPMAnt Model outputting raw hidden-states without any specific head on top.", CPMANT_START_DOCSTRING, ) class CpmAntModel(CpmAntPreTrainedModel): def __init__(self, config: CpmAntConfig): super().__init__(config) self.encoder = CpmAntEncoder(config) self.segment_embedding = nn.Embedding(config.segment_types, config.hidden_size) self.input_embedding = nn.Embedding( config.vocab_size + config.prompt_types * config.prompt_length, config.hidden_size ) self.position_bias = CpmAntSegmentPositionEmbedding(config) self.prompt_length = config.prompt_length self.vocab_size = config.vocab_size self.post_init() def get_input_embeddings(self): return self.input_embedding def set_input_embeddings(self, embeddings, **kwargs): self.input_embedding = embeddings def _prepare_attention_mask(self, input_ids, span, context, length): batch = input_ids.size(0) seqlen = input_ids.size(1) device = input_ids.device directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(seqlen, device=device).view(-1, 1) attention_mask = context[:, None, :] | ( context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen) ) attention_mask = attention_mask & (span[:, None, :] == span[:, :, None]) # mask for left padding mask_1d = ( torch.tensor(list(range(seqlen - self.prompt_length))[::-1], device=device)[None, :].repeat(batch, 1) < length[:, None] ) mask_1d = torch.cat((torch.ones(batch, self.prompt_length, device=device).bool(), mask_1d), dim=1) attention_mask = mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask return attention_mask @add_start_docstrings_to_model_forward(CPMANT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, use_cache: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache # add prompts ahead if input_ids.dtype != torch.int32: input_ids = input_ids.to(torch.int32) dtype, device = input_ids.dtype, input_ids.device segment = torch.where(input_ids != 0, 2, 0).to(dtype=dtype, device=device) length = (segment != 0).sum(-1).to(dtype=dtype, device=device) input_ids = torch.cat( ( torch.arange( self.prompt_length * 2 + self.vocab_size, self.prompt_length * 3 + self.vocab_size, dtype=dtype, device=device, ).repeat(input_ids.size(0), 1), input_ids, ), dim=1, ) batch, seq_length = input_ids.size() segment = torch.cat((torch.zeros(batch, self.prompt_length, dtype=dtype, device=device), segment), dim=1) context = torch.full((batch, seq_length), 1, dtype=dtype, device=device) position = torch.arange(seq_length, dtype=dtype, device=device).repeat(batch, 1) span = torch.full((batch, seq_length), 0, dtype=dtype, device=device) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * self.encoder.num_layers) input_ids = input_ids.contiguous() hidden_states = self.input_embedding(input_ids) segment_states = self.segment_embedding(segment) hidden_states = hidden_states + segment_states else: past_length = past_key_values[0][0].size(-2) segment_states = self.segment_embedding(segment) hidden_states = self.input_embedding(input_ids) + segment_states[:, -1:, :] attention_mask = self._prepare_attention_mask(input_ids, span, context, length) position_bias = self.position_bias(position, position, segment, segment) attention_mask = attention_mask[:, past_length:, :] position_bias = position_bias[:, :, past_length:, :] hidden_states = hidden_states[:, past_length:, :] hidden_states, present_key_values, all_hidden_states, all_attentions = self.encoder( hidden_states, attention_mask, position_bias, output_attentions, output_hidden_states, past_key_values, use_cache, ) if past_length == 0: hidden_states = hidden_states[:, self.prompt_length :, :] # drop the prompt if all_attentions is not None: new_attentions = () for attention in all_attentions: new_attentions += (attention[:, :, self.prompt_length :, self.prompt_length :],) all_attentions = new_attentions if all_hidden_states is not None: new_hidden_states = () for hidden_state in all_hidden_states: new_hidden_states += (hidden_state[:, self.prompt_length :, :],) all_hidden_states = new_hidden_states if not return_dict: return tuple( v for v in [hidden_states, present_key_values, all_hidden_states, all_attentions] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_attentions, ) @add_start_docstrings( """ The CPMAnt Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """, CPMANT_START_DOCSTRING, ) class CpmAntForCausalLM(CpmAntPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: CpmAntConfig): super().__init__(config) self.cpmant = CpmAntModel(config) # lm_head.weight is tied to cpmant.input_embedding.weight self.lm_head = nn.Linear( config.hidden_size, config.vocab_size + config.prompt_types * config.prompt_length, bias=False ) self.post_init() @add_start_docstrings_to_model_forward(CPMANT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, attention_mask: Optional[torch.Tensor] = None, # dummy parameter for text-generation pipeline **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): CPMAnt will process attention mask automatically, this parameter is a dummy parameter for text-generation pipeline. Example: Text Generation with CpmAntForCausalLM. ```python >>> from transformers import CPMAntTokenizer, CpmAntForCausalLM >>> texts = "今天天气不错," >>> model = CpmAntForCausalLM.from_pretrained("openbmb/cpm-ant-10b") >>> tokenizer = CPMAntTokenizer.from_pretrained("openbmb/cpm-ant-10b") >>> input_ids = tokenizer(texts, return_tensors="pt") >>> outputs = model.generate(**input_ids) >>> output_texts = tokenizer.batch_decode(outputs) >>> print(output_texts) ['今天天气不错,阳光明媚,我和妈妈一起去超市买东西。\n在超市里,我看到了一个很好玩的玩具,它的名字叫“机器人”。它有一个圆圆的脑袋,两只圆圆的眼睛,还有一个圆圆的'] ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict model_output = self.cpmant( input_ids, output_attentions, output_hidden_states, past_key_values, use_cache, return_dict ) hidden_states = model_output.last_hidden_state if return_dict else model_output[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: loss_func = CrossEntropyLoss() loss = loss_func(logits.view(-1, logits.size(-1)), labels.view(-1)) if not return_dict: output = (logits,) + model_output[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=model_output.past_key_values, hidden_states=model_output.hidden_states, attentions=model_output.attentions, ) def get_input_embeddings(self): return self.cpmant.input_embedding def set_input_embeddings(self, embeddings): self.cpmant.input_embedding = embeddings def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids, **kwargs): input_ids = input_ids.int() # save the memory usage of dummy attention mask if "attention_mask" in kwargs: kwargs["attention_mask"] = torch.zeros(1, 1) return { "input_ids": input_ids, "use_cache": kwargs["use_cache"], "past_key_values": kwargs.get("past_key_values", None), } def _reorder_cache(self, past_key_values, beam_idx): past_key_values = [list(each) if each is not None else each for each in past_key_values] for key_value_layer in past_key_values: key_value_layer[0] = key_value_layer[0][beam_idx] key_value_layer[1] = key_value_layer[1][beam_idx] return past_key_values
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/cpmant/tokenization_cpmant.py
# coding=utf-8 # Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for CPMAnt.""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab class WordpieceTokenizer(object): def __init__(self, vocab, unk_token="<unk>", max_input_chars_per_word=200): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, token): chars = list(token) if len(chars) > self.max_input_chars_per_word: return [self.unk_token] start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(cur_substr) start = end return sub_tokens class CpmAntTokenizer(PreTrainedTokenizer): """ Construct a CPMAnt tokenizer. Based on byte-level Byte-Pair-Encoding. Args: vocab_file (`str`): Path to the vocabulary file. bod_token (`str`, *optional*, defaults to `"<d>"`): The beginning of document token. eod_token (`str`, *optional*, defaults to `"</d>"`): The end of document token. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. line_token (`str`, *optional*, defaults to `"</n>"`): The line token. space_token (`str`, *optional*, defaults to `"</_>"`): The space token. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] add_prefix_space = False def __init__( self, vocab_file, bod_token="<d>", eod_token="</d>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", unk_token="<unk>", line_token="</n>", space_token="</_>", padding_side="left", **kwargs, ): requires_backends(self, ["jieba"]) self.bod_token = bod_token self.eod_token = eod_token self.encoder = load_vocab(vocab_file) self.encoder[" "] = self.encoder[space_token] self.encoder["\n"] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1])) self.decoder = {v: k for k, v in self.encoder.items()} self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=unk_token) super().__init__( bod_token=bod_token, eod_token=eod_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, unk_token=unk_token, line_token=line_token, space_token=space_token, padding_side=padding_side, **kwargs, ) @property def bod_token_id(self): return self.encoder[self.bod_token] @property def eod_token_id(self): return self.encoder[self.eod_token] @property def newline_id(self): return self.encoder["\n"] @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def _tokenize(self, text): """Tokenize a string.""" output_tokens = [] for x in jieba.cut(text, cut_all=False): output_tokens.extend(self.wordpiece_tokenizer.tokenize(x)) return output_tokens def _decode(self, token_ids, **kwargs): """Decode ids into a string.""" token_ids = [i for i in token_ids if i >= 0] token_ids = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(token_ids, **kwargs) def check(self, token): return token in self.encoder def convert_tokens_to_string(self, tokens: List[str]) -> str: return "".join(tokens) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory index = 0 if " " in self.encoder: self.encoder["</_>"] = self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: self.encoder["</n>"] = self.encoder["\n"] del self.encoder["\n"] self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1])) with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: List[int] = None) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CPMAnt sequence has the following format: - single sequence: `[BOS] Sequence`. Args: token_ids_0 (`List[int]`): The first tokenized sequence that special tokens will be added. token_ids_1 (`List[int]`): The optional second tokenized sequence that special tokens will be added. Returns: `List[int]`: The model input with special tokens. """ if token_ids_1 is None: return [self.bos_token_id] + token_ids_0 return [self.bos_token_id] + token_ids_0 + [self.bos_token_id] + token_ids_1 def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) return [1] + ([0] * len(token_ids_0))
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/cpmant/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_cpmant"] = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gpt_neo/modeling_gpt_neo.py
# coding=utf-8 # Copyright 2021 The Eleuther AI and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch GPT Neo model.""" import os from typing import Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import is_torch_greater_or_equal_than_1_13 from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, is_torch_fx_available, logging, ) from .configuration_gpt_neo import GPTNeoConfig if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. # It means that the function will not be traced through and simply appear as a node in the graph. if is_torch_fx_available(): if not is_torch_greater_or_equal_than_1_13: import torch.fx _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "GPTNeoConfig" from ..deprecated._archive_maps import GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 _CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neo-1.3B" # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path): """Load tf checkpoints in a pytorch model""" try: import re import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(gpt_neo_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: if "global_step" not in name and "adam" not in name: array = tf.train.load_variable(tf_path, name) array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy() name = name.replace("attn/q", "attn/attention/q_proj/w") name = name.replace("attn/k", "attn/attention/k_proj/w") name = name.replace("attn/v", "attn/attention/v_proj/w") name = name.replace("attn/o", "attn/attention/out_proj/w") name = name.replace("norm_1", "ln_1") name = name.replace("norm_2", "ln_2") name = name.replace("attn/compute_output_bias/o_b", "attn/attention/out_proj/b") name = name.replace("conv1d_main/c_fc/kernel", "c_fc/w") name = name.replace("conv1d_main/c_fc/bias", "c_fc/b") name = name.replace("conv1d_main/c_proj/kernel", "c_proj/w") name = name.replace("conv1d_main/c_proj/bias", "c_proj/b") names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name[5:] # skip "gpt2/" name = name.split("/") pointer = model.transformer for m_name in name: if re.fullmatch(r"[A-Za-z]+\d+", m_name): scope_names = re.split(r"(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "w" or scope_names[0] == "g": pointer = getattr(pointer, "weight") elif scope_names[0] == "b": pointer = getattr(pointer, "bias") elif scope_names[0] == "wpe" or scope_names[0] == "wte": pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, "weight") else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if name[-1] == "w" and name[-2] in ["out_proj", "k_proj", "q_proj", "v_proj", "c_proj", "c_fc"]: array = array.transpose() if name == ["wte"]: # if vocab is padded, then trim off the padding embeddings array = array[: config.vocab_size] if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}") print(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) # init the final linear layer using word embeddings embs = model.transformer.wte.weight lin = nn.Linear(embs.size()[1], embs.size()[0], bias=False) lin.weight = embs model.set_output_embeddings(lin) return model class GPTNeoSelfAttention(nn.Module): def __init__(self, config, attention_type): super().__init__() self.config = config max_positions = config.max_position_embeddings bias = torch.tril(torch.ones((max_positions, max_positions), dtype=bool)).view( 1, 1, max_positions, max_positions ) # local causal self attention is a sliding window where each token can only attend to the previous # window_size tokens. This is implemented by updating the causal mask such that for each token # all other tokens are masked except the previous window_size tokens. if attention_type == "local": bias = torch.bitwise_xor(bias, torch.tril(bias, -config.window_size)) self.register_buffer("bias", bias, persistent=False) self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False) self.attn_dropout = nn.Dropout(float(config.attention_dropout)) self.resid_dropout = nn.Dropout(float(config.resid_dropout)) self.is_causal = True self.embed_dim = config.hidden_size self.num_heads = config.num_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) def _split_heads(self, tensor, num_heads, attn_head_size): """ Splits hidden_size dim into attn_head_size and num_heads """ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) def _merge_heads(self, tensor, num_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden_size """ tensor = tensor.permute(0, 2, 1, 3).contiguous() new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) return tensor.view(new_shape) def _attn(self, query, key, value, attention_mask=None, head_mask=None): # Keep the attention weights computation in fp32 to avoid overflow issues query = query.to(torch.float32) key = key.to(torch.float32) attn_weights = torch.matmul(query, key.transpose(-1, -2)) query_length, key_length = query.size(-2), key.size(-2) causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.to(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def forward( self, hidden_states, attention_mask=None, layer_past=None, head_mask=None, use_cache=False, output_attentions=False, ): query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if layer_past is not None: past_key = layer_past[0] past_value = layer_past[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs # a, present, (attentions) class GPTNeoFlashAttention2(GPTNeoSelfAttention): """ GPTNeo flash attention module. This module inherits from `GPTNeoSelfAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward( self, hidden_states, attention_mask=None, layer_past=None, head_mask=None, use_cache=False, output_attentions=False, ): bsz, _, _ = hidden_states.size() query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if layer_past is not None: past_key = layer_past[0] past_value = layer_past[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None query_length = query.shape[2] tgt_len = key.shape[2] # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim query = query.transpose(1, 2).view(bsz, query_length, self.num_heads, self.head_dim) key = key.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) value = value.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) attn_dropout = self.config.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) if query.dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query = query.to(target_dtype) key = key.to(target_dtype) value = value.to(target_dtype) attn_output = self._flash_attention_forward( query, key, value, attention_mask, query_length, dropout=attn_dropout, softmax_scale=1.0 ) attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim) attn_output = self.out_proj(attn_weights_reshaped) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights_reshaped,) return outputs # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be passed to Flash Attention API key_states (`torch.Tensor`): Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`float`): Attention dropout softmax_scale (`float`, *optional*): The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ if not self._flash_attn_uses_top_left_mask: causal = self.is_causal else: # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. causal = self.is_causal and query_length != 1 # Contains at least one padding token in the sequence if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) else: attn_output = flash_attn_func( query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal ) return attn_output # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) value_layer = index_first_axis( value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k ) if query_length == kv_seq_len: query_layer = index_first_axis( query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k ) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange( batch_size + 1, dtype=torch.int32, device=query_layer.device ) # There is a memcpy here, that is very bad. indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. attention_mask = attention_mask[:, -query_length:] query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k), ) GPT_NEO_ATTENTION_CLASSES = { "eager": GPTNeoSelfAttention, "flash_attention_2": GPTNeoFlashAttention2, } class GPTNeoAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.layer_id = layer_id self.attention_layers = config.attention_layers self.attention_type = self.attention_layers[layer_id] if self.attention_type in ["global", "local"]: self.attention = GPT_NEO_ATTENTION_CLASSES[config._attn_implementation](config, self.attention_type) else: raise NotImplementedError( "Only attn layer types 'global' and 'local' exist, but got `config.attention_layers`: " f"{config.attention_layers}. Select attn layer types from ['global', 'local'] only." ) def forward( self, hidden_states, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, ): return self.attention( hidden_states, attention_mask=attention_mask, layer_past=layer_past, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) class GPTNeoMLP(nn.Module): def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * hidden_size super().__init__() embed_dim = config.hidden_size self.c_fc = nn.Linear(embed_dim, intermediate_size) self.c_proj = nn.Linear(intermediate_size, embed_dim) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(float(config.resid_dropout)) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GPTNeoBlock(nn.Module): def __init__(self, config, layer_id): super().__init__() hidden_size = config.hidden_size inner_dim = config.intermediate_size if config.intermediate_size is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPTNeoAttention(config, layer_id) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPTNeoMLP(inner_dim, config) def forward( self, hidden_states, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, ): residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] # output_attn: a, present, (attentions) outputs = attn_outputs[1:] # residual connection hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) # residual connection hidden_states = residual + feed_forward_hidden_states if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs # hidden_states, present, (attentions, cross_attentions) class GPTNeoPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTNeoConfig load_tf_weights = load_tf_weights_in_gpt_neo base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["GPTNeoBlock"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear,)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) GPT_NEO_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`GPTNeoConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GPT_NEO_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.num_layers`): Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have their past given to this model should not be passed as `input_ids` as they have already been computed. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see `past_key_values`). use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare GPT Neo Model transformer outputting raw hidden-states without any specific head on top.", GPT_NEO_START_DOCSTRING, ) class GPTNeoModel(GPTNeoPreTrainedModel): def __init__(self, config): super().__init__(config) self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(float(config.embed_dropout)) self.h = nn.ModuleList([GPTNeoBlock(config, layer_id=i) for i in range(config.num_layers)]) self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x num_heads x N x N # head_mask has shape n_layer x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.num_layers) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds # Attention mask. if self._use_flash_attention_2: # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_length) if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, None, attention_mask, head_mask[i], use_cache, output_attentions, ) else: outputs = block( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @add_start_docstrings( """ The GPT Neo Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT_NEO_START_DOCSTRING, ) class GPTNeoForCausalLM(GPTNeoPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.transformer = GPTNeoModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): token_type_ids = kwargs.get("token_type_ids", None) # Omit tokens covered by past_key_values if past_key_values: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -input_ids.shape[1] :] attention_mask = kwargs.get("attention_mask", None) position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } ) return model_inputs @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # Compute loss in fp32 to match with mesh-tf version # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179 lm_logits = lm_logits.to(torch.float32) # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) lm_logits = lm_logits.to(hidden_states.dtype) loss = loss.to(hidden_states.dtype) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: """ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. """ return tuple( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) for layer_past in past_key_values ) @add_start_docstrings( """ The GPTNeo Model transformer with a sequence classification head on top (linear layer). [`GPTNeoForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, GPT_NEO_START_DOCSTRING, ) class GPTNeoForSequenceClassification(GPTNeoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTNeoModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ GPT Neo model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, GPT_NEO_START_DOCSTRING, ) class GPTNeoForTokenClassification(GPTNeoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTNeoModel(config) self.dropout = nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint="EleutherAI/gpt-neo-125m", output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_loss=0.25, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ The GPT-Neo Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, GPT_NEO_START_DOCSTRING, ) class GPTNeoForQuestionAnswering(GPTNeoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTNeoModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, real_checkpoint=_CHECKPOINT_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gpt_neo/configuration_gpt_neo.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GPT Neo model configuration""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging logger = logging.get_logger(__name__) from ..deprecated._archive_maps import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class GPTNeoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTNeoModel`]. It is used to instantiate a GPT Neo model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTNeo [EleutherAI/gpt-neo-1.3B](https://huggingface.co/EleutherAI/gpt-neo-1.3B) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50257): Vocabulary size of the GPT Neo model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTNeoModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`GPTNeoModel`]. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_size (`int`, *optional*, defaults to 2048): Dimensionality of the encoder layers and the pooler layer. num_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. attention_types (`List`, *optional*, defaults to `[[['global', 'local'], 12]]`): The type of attention for each layer in a `List` of the following format `[[["attention_type"], num_layerss]]` e.g. for a 24 layer model `[[["global"], 24]]` or `[[["global", "local"], 12]]` Choose the value of `attention_type` from `["global", "local"]` num_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 8192): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. window_size (`int`, *optional*, defaults to 256): The size of the sliding window for local attention. activation_function (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. resid_dropout (`float`, *optional*, defaults to 0.0): Residual dropout used in the attention pattern. embed_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. classifier_dropout (`float`, *optional*, defaults to 0.1): Argument used when doing token classification, used in the model [`GPTNeoForTokenClassification`]. The dropout ratio for the hidden layer. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. bos_token_id (`int`, *optional*, defaults to 50256): The id of the beginning of sentence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 50256): The id of the end of sentence token in the vocabulary. Example: ```python >>> from transformers import GPTNeoConfig, GPTNeoModel >>> # Initializing a GPTNeo EleutherAI/gpt-neo-1.3B style configuration >>> configuration = GPTNeoConfig() >>> # Initializing a model (with random weights) from the EleutherAI/gpt-neo-1.3B style configuration >>> model = GPTNeoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gpt_neo" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self, vocab_size=50257, max_position_embeddings=2048, hidden_size=2048, num_layers=24, attention_types=[[["global", "local"], 12]], num_heads=16, intermediate_size=None, window_size=256, activation_function="gelu_new", resid_dropout=0.0, embed_dropout=0.0, attention_dropout=0.0, classifier_dropout=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, use_cache=True, bos_token_id=50256, eos_token_id=50256, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_layers = num_layers self.num_heads = num_heads self.intermediate_size = intermediate_size self.window_size = window_size self.activation_function = activation_function self.resid_dropout = resid_dropout self.embed_dropout = embed_dropout self.attention_dropout = attention_dropout self.classifier_dropout = classifier_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.attention_types = attention_types self.attention_layers = self.expand_attention_types_params(attention_types) if len(self.attention_layers) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " f"but is `len(config.attention_layers) = {len(self.attention_layers)}`, " f"`config.num_layers = {self.num_layers}`. " "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) @staticmethod def expand_attention_types_params(attention_types): attentions = [] for item in attention_types: for _ in range(item[1]): attentions.extend(item[0]) return attentions def custom_unfold(input, dimension, size, step): """Custom torch.Tensor.unfold implementation to enable the export to ONNX.""" import torch shape = input.size() rank = len(shape) sizedim = shape[dimension] low_indices = torch.arange(0, sizedim, step) min_length = torch.div(sizedim - size, step, rounding_mode="floor") + 1 indices = torch.arange(size) + low_indices[:min_length][:, None] s = [slice(None)] * rank s[dimension] = indices sliced = input[s] perm = list(range(0, rank + 1)) perm.append(perm.pop(dimension + 1)) return sliced.permute(perm) def custom_get_block_length_and_num_blocks(seq_length, window_size): """ Custom implementation for GPTNeoAttentionMixin._get_block_length_and_num_blocks to enable the export to ONNX as original implementation uses Python variables and control flow. """ import torch candidates = torch.arange(1, window_size) remainders = torch.remainder(seq_length, candidates) divisor_indices = remainders == 0 divisors = candidates[divisor_indices] largest_divisor = torch.max(divisors) return largest_divisor, torch.div(seq_length, largest_divisor, rounding_mode="floor") class GPTNeoOnnxConfig(OnnxConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}}) if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"} else: common_inputs["attention_mask"] = {0: "batch", 1: "sequence"} return common_inputs @property def num_attention_heads(self) -> int: return self._config.num_heads def generate_dummy_inputs( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) # We need to order the input in the way they appears in the forward() ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape # Not using the same length for past_key_values past_key_values_length = seqlen + 2 past_shape = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) ordered_inputs["past_key_values"] = [ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers) ] ordered_inputs["attention_mask"] = common_inputs["attention_mask"] if self.use_past: mask_dtype = ordered_inputs["attention_mask"].dtype ordered_inputs["attention_mask"] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 ) return ordered_inputs @property def default_onnx_opset(self) -> int: return 13
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py
# coding=utf-8 # Copyright 2021 The Eleuther AI and The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_gpt_neo import GPTNeoConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "GPTNeoConfig" _CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neo-1.3B" GPT_NEO_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`GPTNeoConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ GPT_NEO_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxGPTNeoSelfAttention(nn.Module): config: GPTNeoConfig attention_type: str dtype: jnp.dtype = jnp.float32 def setup(self): config = self.config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and " f"`num_heads`: {self.num_heads})." ) self.attn_dropout = nn.Dropout(config.attention_dropout) self.resid_dropout = nn.Dropout(config.resid_dropout) dense = partial( nn.Dense, self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.q_proj, self.k_proj, self.v_proj = dense(use_bias=False), dense(use_bias=False), dense(use_bias=False) self.out_proj = dense() self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool") if self.attention_type == "local": self.causal_mask = self.causal_mask ^ jnp.tril(self.causal_mask, -config.window_size) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): query = self.q_proj(hidden_states) * jnp.sqrt(self.head_dim).astype(self.dtype) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query) key = self._split_heads(key) value = self._split_heads(value) query_length, key_length = query.shape[1], key.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] batch_size = hidden_states.shape[0] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) dropout_rng = None if not deterministic and self.config.attention_dropout > 0.0: dropout_rng = self.make_rng("dropout") # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.has_variable("cache", "cached_key") or init_cache: key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask) # transform boolean mask into float mask attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) # usual dot product attention attn_weights = dot_product_attention_weights( query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_dropout, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output, deterministic=deterministic) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxGPTNeoAttention(nn.Module): config: GPTNeoConfig layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): attention_type = self.config.attention_layers[self.layer_id] self.attention = FlaxGPTNeoSelfAttention(self.config, attention_type, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): return self.attention( hidden_states, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) class FlaxGPTNeoMLP(nn.Module): config: GPTNeoConfig intermediate_size: int dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.hidden_size kernel_init = jax.nn.initializers.normal(self.config.initializer_range) self.c_fc = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init) self.c_proj = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init) self.act = ACT2FN[self.config.activation_function] self.dropout = nn.Dropout(rate=self.config.resid_dropout) def __call__(self, hidden_states, deterministic: bool = True): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxGPTNeoBlock(nn.Module): config: GPTNeoConfig layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): hidden_size = self.config.hidden_size inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.attn = FlaxGPTNeoAttention(self.config, layer_id=self.layer_id, dtype=self.dtype) self.ln_2 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.mlp = FlaxGPTNeoMLP(self.config, inner_dim, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): residual = hidden_states hidden_states = self.ln_1(hidden_states) outputs = self.attn( hidden_states, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) # residual connection attn_output = outputs[0] hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic) # residual connection hidden_states = residual + feed_forward_hidden_states return (hidden_states,) + outputs[1:] class FlaxGPTNeoPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTNeoConfig base_model_prefix = "transformer" module_class: nn.Module = None def __init__( self, config: GPTNeoConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) def __call__( self, input_ids, attention_mask=None, position_ids=None, params: dict = None, past_key_values: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_ids.shape if position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.") position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTNeoAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(position_ids, dtype="i4"), not train, False, output_attentions, output_hidden_states, return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] return outputs class FlaxGPTNeoBlockCollection(nn.Module): config: GPTNeoConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.blocks = [ FlaxGPTNeoBlock(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for block in self.blocks: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = block( hidden_states, attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) # this contains possible `None` values - `FlaxGPTNeoModule` will filter them out outputs = (hidden_states, all_hidden_states, all_attentions) return outputs class FlaxGPTNeoModule(nn.Module): config: GPTNeoConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embed_dim = self.config.hidden_size embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range) self.wte = nn.Embed( self.config.vocab_size, self.embed_dim, embedding_init=embedding_init, ) self.wpe = nn.Embed( self.config.max_position_embeddings, self.embed_dim, embedding_init=embedding_init, ) self.dropout = nn.Dropout(rate=self.config.embed_dropout) self.h = FlaxGPTNeoBlockCollection(self.config, dtype=self.dtype) self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, deterministic=True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): input_embeds = self.wte(input_ids.astype("i4")) position_embeds = self.wpe(position_ids.astype("i4")) hidden_states = input_embeds + position_embeds hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.h( hidden_states, attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = outputs[1] + (hidden_states,) outputs = (hidden_states, all_hidden_states) + outputs[2:] else: outputs = (hidden_states,) + outputs[1:] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=outputs[1], attentions=outputs[-1], ) @add_start_docstrings( "The bare GPTNeo Model transformer outputting raw hidden-states without any specific head on top.", GPT_NEO_START_DOCSTRING, ) class FlaxGPTNeoModel(FlaxGPTNeoPreTrainedModel): module_class = FlaxGPTNeoModule append_call_sample_docstring(FlaxGPTNeoModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC) class FlaxGPTNeoForCausalLMModule(nn.Module): config: GPTNeoConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.transformer = FlaxGPTNeoModule(self.config, dtype=self.dtype) self.lm_head = nn.Dense( self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) def __call__( self, input_ids, attention_mask, position_ids, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): outputs = self.transformer( input_ids, attention_mask, position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings( """ The GPTNeo Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT_NEO_START_DOCSTRING, ) class FlaxGPTNeoForCausalLM(FlaxGPTNeoPreTrainedModel): module_class = FlaxGPTNeoForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since GPTNeo uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs append_call_sample_docstring(FlaxGPTNeoForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gpt_neo/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _import_structure = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_gpt_neo"] = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_gpt_neo"] = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py
# coding=utf-8 # Copyright 2021 The Eleuther AI and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert GPT Neo checkpoint.""" import argparse import json from transformers import GPTNeoConfig, GPTNeoForCausalLM, load_tf_weights_in_gpt_neo from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): # Initialise PyTorch model config_json = json.load(open(config_file, "r")) config = GPTNeoConfig( hidden_size=config_json["n_embd"], num_layers=config_json["n_layer"], num_heads=config_json["n_head"], attention_types=config_json["attention_types"], max_position_embeddings=config_json["n_positions"], resid_dropout=config_json["res_dropout"], embed_dropout=config_json["embed_dropout"], attention_dropout=config_json["attn_dropout"], ) print(f"Building PyTorch model from configuration: {config}") model = GPTNeoForCausalLM(config) # Load weights from tf checkpoint load_tf_weights_in_gpt_neo(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained mesh-tf model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/focalnet/modeling_focalnet.py
# coding=utf-8 # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch FocalNet model.""" import collections.abc import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_focalnet import FocalNetConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "FocalNetConfig" # Base docstring _CHECKPOINT_FOR_DOC = "microsoft/focalnet-tiny" _EXPECTED_OUTPUT_SHAPE = [1, 49, 768] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "microsoft/focalnet-tiny" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" from ..deprecated._archive_maps import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 @dataclass class FocalNetEncoderOutput(ModelOutput): """ FocalNet encoder's outputs, with potential hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class FocalNetModelOutput(ModelOutput): """ FocalNet model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class FocalNetMaskedImageModelingOutput(ModelOutput): """ FocalNet masked image model outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): Masked image modeling (MLM) loss. reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed pixel values. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None reconstruction: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class FocalNetImageClassifierOutput(ModelOutput): """ FocalNet outputs for image classification. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class FocalNetEmbeddings(nn.Module): """ Construct the patch embeddings and layernorm. Optionally, also the mask token. """ def __init__(self, config, use_mask_token=False): super().__init__() self.patch_embeddings = FocalNetPatchEmbeddings( config=config, image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.embed_dim, use_conv_embed=config.use_conv_embed, is_stem=True, ) self.patch_grid = self.patch_embeddings.grid_size self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None self.norm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None ) -> Tuple[torch.Tensor]: embeddings, output_dimensions = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) batch_size, seq_len, _ = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask embeddings = self.dropout(embeddings) return embeddings, output_dimensions class FocalNetPatchEmbeddings(nn.Module): def __init__( self, config, image_size, patch_size, num_channels, embed_dim, add_norm=False, use_conv_embed=False, is_stem=False, ): super().__init__() image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) if use_conv_embed: # if we choose to use conv embedding, then we treat the stem and non-stem differently if is_stem: kernel_size = 7 padding = 2 stride = 4 else: kernel_size = 3 padding = 1 stride = 2 self.projection = nn.Conv2d( num_channels, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding ) else: self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size) if add_norm: self.norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) else: self.norm = None def maybe_pad(self, pixel_values, height, width): if width % self.patch_size[1] != 0: pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) pixel_values = nn.functional.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) pixel_values = nn.functional.pad(pixel_values, pad_values) return pixel_values def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # pad the input to be divisible by self.patch_size, if needed pixel_values = self.maybe_pad(pixel_values, height, width) embeddings = self.projection(pixel_values) _, _, height, width = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) if self.norm is not None: embeddings = self.norm(embeddings) return embeddings, output_dimensions # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->FocalNet class FocalNetDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class FocalNetModulation(nn.Module): def __init__(self, config, index, dim, focal_factor=2, bias=True, projection_dropout=0.0): super().__init__() self.dim = dim self.focal_window = config.focal_windows[index] self.focal_level = config.focal_levels[index] self.focal_factor = focal_factor self.use_post_layernorm_in_modulation = config.use_post_layernorm_in_modulation self.normalize_modulator = config.normalize_modulator self.projection_in = nn.Linear(dim, 2 * dim + (self.focal_level + 1), bias=bias) self.projection_context = nn.Conv2d(dim, dim, kernel_size=1, stride=1, bias=bias) self.activation = nn.GELU() self.projection_out = nn.Linear(dim, dim) self.projection_dropout = nn.Dropout(projection_dropout) self.focal_layers = nn.ModuleList() self.kernel_sizes = [] for k in range(self.focal_level): kernel_size = self.focal_factor * k + self.focal_window self.focal_layers.append( nn.Sequential( nn.Conv2d( dim, dim, kernel_size=kernel_size, stride=1, groups=dim, padding=kernel_size // 2, bias=False ), nn.GELU(), ) ) self.kernel_sizes.append(kernel_size) if self.use_post_layernorm_in_modulation: self.layernorm = nn.LayerNorm(dim, eps=config.layer_norm_eps) def forward(self, hidden_state): """ Args: hidden_state: Input features with shape of (batch_size, height, width, num_channels) """ num_channels = hidden_state.shape[-1] # pre linear projection x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous() q, ctx, self.gates = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1) # context aggreation ctx_all = 0 for level in range(self.focal_level): ctx = self.focal_layers[level](ctx) ctx_all = ctx_all + ctx * self.gates[:, level : level + 1] ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True)) ctx_all = ctx_all + ctx_global * self.gates[:, self.focal_level :] # normalize context if self.normalize_modulator: ctx_all = ctx_all / (self.focal_level + 1) # focal modulation self.modulator = self.projection_context(ctx_all) x_out = q * self.modulator x_out = x_out.permute(0, 2, 3, 1).contiguous() if self.use_post_layernorm_in_modulation: x_out = self.layernorm(x_out) # post linear porjection x_out = self.projection_out(x_out) x_out = self.projection_dropout(x_out) return x_out class FocalNetMlp(nn.Module): def __init__(self, config, in_features, hidden_features=None, out_features=None, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.activation = ACT2FN[config.hidden_act] self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, hidden_state): hidden_state = self.fc1(hidden_state) hidden_state = self.activation(hidden_state) hidden_state = self.drop(hidden_state) hidden_state = self.fc2(hidden_state) hidden_state = self.drop(hidden_state) return hidden_state class FocalNetLayer(nn.Module): r"""Focal Modulation Network layer (block). Args: config (`FocalNetConfig`): Model config. index (`int`): Layer index. dim (`int`): Number of input channels. input_resolution (`Tuple[int]`): Input resulotion. drop_path (`float`, *optional*, defaults to 0.0): Stochastic depth rate. """ def __init__(self, config, index, dim, input_resolution, drop_path=0.0): super().__init__() self.config = config # layer-specific attributes self.dim = dim self.input_resolution = input_resolution # general attributes self.drop = config.hidden_dropout_prob self.use_post_layernorm = config.use_post_layernorm self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.modulation = FocalNetModulation( config=config, index=index, dim=dim, projection_dropout=self.drop, ) self.drop_path = FocalNetDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps) mlp_hidden_dim = int(dim * config.mlp_ratio) self.mlp = FocalNetMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=self.drop) self.gamma_1 = 1.0 self.gamma_2 = 1.0 if config.use_layerscale: self.gamma_1 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True) self.gamma_2 = nn.Parameter(config.layerscale_value * torch.ones((dim)), requires_grad=True) def forward(self, hidden_state, input_dimensions): height, width = input_dimensions batch_size, _, num_channels = hidden_state.shape shortcut = hidden_state # Focal Modulation hidden_state = hidden_state if self.use_post_layernorm else self.norm1(hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = self.modulation(hidden_state).view(batch_size, height * width, num_channels) hidden_state = hidden_state if not self.use_post_layernorm else self.norm1(hidden_state) # FFN hidden_state = shortcut + self.drop_path(self.gamma_1 * hidden_state) hidden_state = hidden_state + self.drop_path( self.gamma_2 * (self.norm2(self.mlp(hidden_state)) if self.use_post_layernorm else self.mlp(self.norm2(hidden_state))) ) return hidden_state class FocalNetStage(nn.Module): def __init__(self, config, index, input_resolution): super().__init__() self.config = config self.num_stages = len(config.depths) embed_dim = [config.embed_dim * (2**i) for i in range(self.num_stages)] dim = embed_dim[index] out_dim = embed_dim[index + 1] if (index < self.num_stages - 1) else None downsample = FocalNetPatchEmbeddings if (index < self.num_stages - 1) else None # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] drop_path = dpr[sum(config.depths[:index]) : sum(config.depths[: index + 1])] self.layers = nn.ModuleList( [ FocalNetLayer( config=config, index=index, dim=dim, input_resolution=input_resolution, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, ) for i in range(config.depths[index]) ] ) if downsample is not None: self.downsample = downsample( config=config, image_size=input_resolution, patch_size=2, num_channels=dim, embed_dim=out_dim, add_norm=True, use_conv_embed=config.use_conv_embed, is_stem=False, ) else: self.downsample = None self.pointing = False def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int]) -> Tuple[torch.Tensor]: height, width = input_dimensions for layer_module in self.layers: hidden_states = layer_module(hidden_states, input_dimensions) hidden_states_before_downsampling = hidden_states if self.downsample is not None: height, width = input_dimensions hidden_states = hidden_states.transpose(1, 2).reshape( hidden_states_before_downsampling.shape[0], -1, height, width ) hidden_states, output_dimensions = self.downsample(hidden_states) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) return stage_outputs class FocalNetEncoder(nn.Module): def __init__(self, config, grid_size): super().__init__() self.num_stages = len(config.depths) self.config = config self.stages = nn.ModuleList( [ FocalNetStage( config=config, index=i_layer, input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), ) for i_layer in range(self.num_stages) ] ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, FocalNetEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None if output_hidden_states: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, stage_module in enumerate(self.stages): if self.gradient_checkpointing and self.training: stage_outputs = self._gradient_checkpointing_func( stage_module.__call__, hidden_states, input_dimensions, ) else: stage_outputs = stage_module(hidden_states, input_dimensions) hidden_states = stage_outputs[0] hidden_states_before_downsampling = stage_outputs[1] output_dimensions = stage_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states_before_downsampling.shape # rearrange b (h w) c -> b c h w # here we use the original (not downsampled) height and width reshaped_hidden_state = hidden_states_before_downsampling.view( batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size ) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return FocalNetEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, reshaped_hidden_states=all_reshaped_hidden_states, ) # Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->FocalNet,swin->focalnet class FocalNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = FocalNetConfig base_model_prefix = "focalnet" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["FocalNetStage"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) FOCALNET_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`FocalNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ FOCALNET_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`AutoImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare FocalNet Model outputting raw hidden-states without any specific head on top.", FOCALNET_START_DOCSTRING, ) class FocalNetModel(FocalNetPreTrainedModel): def __init__(self, config, add_pooling_layer=True, use_mask_token=False): super().__init__(config) self.config = config self.num_stages = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_stages - 1)) self.embeddings = FocalNetEmbeddings(config, use_mask_token=use_mask_token) self.encoder = FocalNetEncoder(config, self.embeddings.patch_grid) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=FocalNetModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, FocalNetModelOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) encoder_outputs = self.encoder( embedding_output, input_dimensions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return FocalNetModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) @add_start_docstrings( """FocalNet Model with a decoder on top for masked image modeling. This follows the same implementation as in [SimMIM](https://arxiv.org/abs/2111.09886). <Tip> Note that we provide a script to pre-train this model on custom data in our [examples directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). </Tip> """, FOCALNET_START_DOCSTRING, ) class FocalNetForMaskedImageModeling(FocalNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.focalnet = FocalNetModel(config, add_pooling_layer=False, use_mask_token=True) self.num_stages = len(config.depths) num_features = int(config.embed_dim * 2 ** (self.num_stages - 1)) self.decoder = nn.Sequential( nn.Conv2d( in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1 ), nn.PixelShuffle(config.encoder_stride), ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FocalNetMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, FocalNetMaskedImageModelingOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, FocalNetConfig, FocalNetForMaskedImageModeling >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-base-simmim-window6-192") >>> config = FocalNetConfig() >>> model = FocalNetForMaskedImageModeling(config) >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values >>> # create random boolean mask of shape (batch_size, num_patches) >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool() >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss, reconstructed_pixel_values = outputs.loss, outputs.logits >>> list(reconstructed_pixel_values.shape) [1, 3, 192, 192] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.focalnet( pixel_values, bool_masked_pos=bool_masked_pos, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # Reshape to (batch_size, num_channels, height, width) sequence_output = sequence_output.transpose(1, 2) batch_size, num_channels, sequence_length = sequence_output.shape height = width = math.floor(sequence_length**0.5) sequence_output = sequence_output.reshape(batch_size, num_channels, height, width) # Reconstruct pixel values reconstructed_pixel_values = self.decoder(sequence_output) masked_im_loss = None if bool_masked_pos is not None: size = self.config.image_size // self.config.patch_size bool_masked_pos = bool_masked_pos.reshape(-1, size, size) mask = ( bool_masked_pos.repeat_interleave(self.config.patch_size, 1) .repeat_interleave(self.config.patch_size, 2) .unsqueeze(1) .contiguous() ) reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none") masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels if not return_dict: output = (reconstructed_pixel_values,) + outputs[2:] return ((masked_im_loss,) + output) if masked_im_loss is not None else output return FocalNetMaskedImageModelingOutput( loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, reshaped_hidden_states=outputs.reshaped_hidden_states, ) @add_start_docstrings( """ FocalNet Model with an image classification head on top (a linear layer on top of the pooled output) e.g. for ImageNet. """, FOCALNET_START_DOCSTRING, ) class FocalNetForImageClassification(FocalNetPreTrainedModel): # Copied from transformers.models.swin.modeling_swin.SwinForImageClassification.__init__ with Swin->FocalNet, swin->focalnet def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.focalnet = FocalNetModel(config) # Classifier head self.classifier = ( nn.Linear(self.focalnet.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=FocalNetImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, FocalNetImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.focalnet( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return FocalNetImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, reshaped_hidden_states=outputs.reshaped_hidden_states, ) @add_start_docstrings( """ FocalNet backbone, to be used with frameworks like X-Decoder. """, FOCALNET_START_DOCSTRING, ) class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin): def __init__(self, config: FocalNetConfig): super().__init__(config) super()._init_backbone(config) self.num_features = [config.embed_dim] + config.hidden_sizes self.focalnet = FocalNetModel(config) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny-lrf") >>> model = AutoBackbone.from_pretrained("microsoft/focalnet-tiny-lrf") >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.focalnet(pixel_values, output_hidden_states=True, return_dict=True) hidden_states = outputs.reshaped_hidden_states feature_maps = () for idx, stage in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, )
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/focalnet/__init__.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_focalnet"] = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
mavonic_private_repos/transformers/src/transformers/models
mavonic_private_repos/transformers/src/transformers/models/focalnet/configuration_focalnet.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FocalNet model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) from ..deprecated._archive_maps import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class FocalNetConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FocalNetModel`]. It is used to instantiate a FocalNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FocalNet [microsoft/focalnet-tiny](https://huggingface.co/microsoft/focalnet-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch in the embeddings layer. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 96): Dimensionality of patch embedding. use_conv_embed (`bool`, *optional*, defaults to `False`): Whether to use convolutional embedding. The authors noted that using convolutional embedding usually improve the performance, but it's not used by default. hidden_sizes (`List[int]`, *optional*, defaults to `[192, 384, 768, 768]`): Dimensionality (hidden size) at each stage. depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`): Depth (number of layers) of each stage in the encoder. focal_levels (`list(int)`, *optional*, defaults to `[2, 2, 2, 2]`): Number of focal levels in each layer of the respective stages in the encoder. focal_windows (`list(int)`, *optional*, defaults to `[3, 3, 3, 3]`): Focal window size in each layer of the respective stages in the encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of MLP hidden dimensionality to embedding dimensionality. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. use_layerscale (`bool`, *optional*, defaults to `False`): Whether to use layer scale in the encoder. layerscale_value (`float`, *optional*, defaults to 0.0001): The initial value of the layer scale. use_post_layernorm (`bool`, *optional*, defaults to `False`): Whether to use post layer normalization in the encoder. use_post_layernorm_in_modulation (`bool`, *optional*, defaults to `False`): Whether to use post layer normalization in the modulation layer. normalize_modulator (`bool`, *optional*, defaults to `False`): Whether to normalize the modulator. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. encoder_stride (`int`, *optional*, defaults to 32): Factor to increase the spatial resolution by in the decoder head for masked image modeling. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import FocalNetConfig, FocalNetModel >>> # Initializing a FocalNet microsoft/focalnet-tiny style configuration >>> configuration = FocalNetConfig() >>> # Initializing a model (with random weights) from the microsoft/focalnet-tiny style configuration >>> model = FocalNetModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "focalnet" def __init__( self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, use_conv_embed=False, hidden_sizes=[192, 384, 768, 768], depths=[2, 2, 6, 2], focal_levels=[2, 2, 2, 2], focal_windows=[3, 3, 3, 3], hidden_act="gelu", mlp_ratio=4.0, hidden_dropout_prob=0.0, drop_path_rate=0.1, use_layerscale=False, layerscale_value=1e-4, use_post_layernorm=False, use_post_layernorm_in_modulation=False, normalize_modulator=False, initializer_range=0.02, layer_norm_eps=1e-5, encoder_stride=32, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.use_conv_embed = use_conv_embed self.hidden_sizes = hidden_sizes self.depths = depths self.focal_levels = focal_levels self.focal_windows = focal_windows self.hidden_act = hidden_act self.mlp_ratio = mlp_ratio self.hidden_dropout_prob = hidden_dropout_prob self.drop_path_rate = drop_path_rate self.use_layerscale = use_layerscale self.layerscale_value = layerscale_value self.use_post_layernorm = use_post_layernorm self.use_post_layernorm_in_modulation = use_post_layernorm_in_modulation self.normalize_modulator = normalize_modulator self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.encoder_stride = encoder_stride self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names )
0