diff --git a/janus/lib/python3.10/site-packages/transformers/models/bart/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/bart/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e480a9e2d14012cf3a6a992206006fb8df518683
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/bart/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bert/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/bert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ef22794dde26e6275ba0ae850f6042ff6a451fd
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/bert/__init__.py
@@ -0,0 +1,32 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_bert import *
+ from .modeling_bert import *
+ from .modeling_flax_bert import *
+ from .modeling_tf_bert import *
+ from .tokenization_bert import *
+ from .tokenization_bert_fast import *
+ from .tokenization_bert_tf import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2fc5dd31865dae09ce5e386ddfde48cf89f12ae0
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bert/modeling_bert.py b/janus/lib/python3.10/site-packages/transformers/models/bert/modeling_bert.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c53963cee79220231825eb6c7a8d6dec74e7a6c
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/bert/modeling_bert.py
@@ -0,0 +1,2009 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch BERT model."""
+
+import math
+import os
+import warnings
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from packaging import version
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...generation import GenerationMixin
+from ...modeling_attn_mask_utils import (
+ _prepare_4d_attention_mask_for_sdpa,
+ _prepare_4d_causal_attention_mask_for_sdpa,
+)
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ NextSentencePredictorOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ get_torch_version,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_bert import BertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased"
+_CONFIG_FOR_DOC = "BertConfig"
+
+# TokenClassification docstring
+_CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "dbmdz/bert-large-cased-finetuned-conll03-english"
+_TOKEN_CLASS_EXPECTED_OUTPUT = (
+ "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] "
+)
+_TOKEN_CLASS_EXPECTED_LOSS = 0.01
+
+# QuestionAnswering docstring
+_CHECKPOINT_FOR_QA = "deepset/bert-base-cased-squad2"
+_QA_EXPECTED_OUTPUT = "'a nice puppet'"
+_QA_EXPECTED_LOSS = 7.41
+_QA_TARGET_START_INDEX = 14
+_QA_TARGET_END_INDEX = 15
+
+# SequenceClassification docstring
+_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "textattack/bert-base-uncased-yelp-polarity"
+_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'"
+_SEQ_CLASS_EXPECTED_LOSS = 0.01
+
+
+def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
+ """Load tf checkpoints in a pytorch model."""
+ try:
+ import re
+
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(tf_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array)
+
+ for name, array in zip(names, arrays):
+ name = name.split("/")
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if any(
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
+ for n in name
+ ):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
+ scope_names = re.split(r"_(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "output_weights":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "squad":
+ pointer = getattr(pointer, "classifier")
+ else:
+ try:
+ pointer = getattr(pointer, scope_names[0])
+ except AttributeError:
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+ if m_name[-11:] == "_embeddings":
+ pointer = getattr(pointer, "weight")
+ elif m_name == "kernel":
+ array = np.transpose(array)
+ try:
+ if pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
+ except ValueError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ return model
+
+
+class BertEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ past_key_values_length: int = 0,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class BertSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+class BertSdpaSelfAttention(BertSelfAttention):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__(config, position_embedding_type=position_embedding_type)
+ self.dropout_prob = config.attention_probs_dropout_prob
+ self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse("2.2.0")
+
+ # Adapted from BertSelfAttention
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ if self.position_embedding_type != "absolute" or output_attentions or head_mask is not None:
+ # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once implemented.
+ logger.warning_once(
+ "BertSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support "
+ "non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to "
+ "the manual attention implementation, but specifying the manual implementation will be required from "
+ "Transformers version v5.0.0 onwards. This warning can be removed using the argument "
+ '`attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
+
+ # If this is instantiated as a cross-attention module, the keys and values come from an encoder; the attention
+ # mask needs to be such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ current_states = encoder_hidden_states if is_cross_attention else hidden_states
+ attention_mask = encoder_attention_mask if is_cross_attention else attention_mask
+
+ # Check `seq_length` of `past_key_value` == `len(current_states)` to support prefix tuning
+ if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]:
+ key_layer, value_layer = past_key_value
+ else:
+ key_layer = self.transpose_for_scores(self.key(current_states))
+ value_layer = self.transpose_for_scores(self.value(current_states))
+ if past_key_value is not None and not is_cross_attention:
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom
+ # attn_mask, so we need to call `.contiguous()` here. This was fixed in torch==2.2.0.
+ # Reference: https://github.com/pytorch/pytorch/issues/112577
+ if self.require_contiguous_qkv and query_layer.device.type == "cuda" and attention_mask is not None:
+ query_layer = query_layer.contiguous()
+ key_layer = key_layer.contiguous()
+ value_layer = value_layer.contiguous()
+
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
+ # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create
+ # a causal mask in case tgt_len == 1.
+ is_causal = (
+ True if self.is_decoder and not is_cross_attention and attention_mask is None and tgt_len > 1 else False
+ )
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_layer,
+ key_layer,
+ value_layer,
+ attn_mask=attention_mask,
+ dropout_p=self.dropout_prob if self.training else 0.0,
+ is_causal=is_causal,
+ )
+
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, tgt_len, self.all_head_size)
+
+ outputs = (attn_output,)
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+class BertSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+BERT_SELF_ATTENTION_CLASSES = {
+ "eager": BertSelfAttention,
+ "sdpa": BertSdpaSelfAttention,
+}
+
+
+class BertAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = BERT_SELF_ATTENTION_CLASSES[config._attn_implementation](
+ config, position_embedding_type=position_embedding_type
+ )
+ self.output = BertSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class BertIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class BertOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class BertLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = BertAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = BertAttention(config, position_embedding_type="absolute")
+ self.intermediate = BertIntermediate(config)
+ self.output = BertOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class BertEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class BertPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class BertPredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+class BertLMPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = BertPredictionHeadTransform(config)
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def _tie_weights(self):
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states)
+ return hidden_states
+
+
+class BertOnlyMLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = BertLMPredictionHead(config)
+
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+
+class BertOnlyNSPHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, pooled_output):
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return seq_relationship_score
+
+
+class BertPreTrainingHeads(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = BertLMPredictionHead(config)
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, sequence_output, pooled_output):
+ prediction_scores = self.predictions(sequence_output)
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return prediction_scores, seq_relationship_score
+
+
+class BertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BertConfig
+ load_tf_weights = load_tf_weights_in_bert
+ base_model_prefix = "bert"
+ supports_gradient_checkpointing = True
+ _supports_sdpa = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+@dataclass
+class BertForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`BertForPreTraining`].
+
+ Args:
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
+ (classification) loss.
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ prediction_logits: torch.FloatTensor = None
+ seq_relationship_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+BERT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`BertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`or `(batch_size, sequence_length, target_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
+ BERT_START_DOCSTRING,
+)
+class BertModel(BertPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+ """
+
+ _no_split_modules = ["BertEmbeddings", "BertLayer"]
+
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = BertEmbeddings(config)
+ self.encoder = BertEncoder(config)
+
+ self.pooler = BertPooler(config) if add_pooling_layer else None
+
+ self.attn_implementation = config._attn_implementation
+ self.position_embedding_type = config.position_embedding_type
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, target_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+
+ if attention_mask is None:
+ attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device)
+
+ use_sdpa_attention_masks = (
+ self.attn_implementation == "sdpa"
+ and self.position_embedding_type == "absolute"
+ and head_mask is None
+ and not output_attentions
+ )
+
+ # Expand the attention mask
+ if use_sdpa_attention_masks and attention_mask.dim() == 2:
+ # Expand the attention mask for SDPA.
+ # [bsz, seq_len] -> [bsz, 1, seq_len, seq_len]
+ if self.config.is_decoder:
+ extended_attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
+ attention_mask,
+ input_shape,
+ embedding_output,
+ past_key_values_length,
+ )
+ else:
+ extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(
+ attention_mask, embedding_output.dtype, tgt_len=seq_length
+ )
+ else:
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+
+ if use_sdpa_attention_masks and encoder_attention_mask.dim() == 2:
+ # Expand the attention mask for SDPA.
+ # [bsz, seq_len] -> [bsz, 1, seq_len, seq_len]
+ encoder_extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(
+ encoder_attention_mask, embedding_output.dtype, tgt_len=seq_length
+ )
+ else:
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
+ sentence prediction (classification)` head.
+ """,
+ BERT_START_DOCSTRING,
+)
+class BertForPreTraining(BertPreTrainedModel):
+ _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.bert = BertModel(config)
+ self.cls = BertPreTrainingHeads(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+ self.cls.predictions.bias = new_embeddings.bias
+
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ next_sentence_label: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BertForPreTrainingOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
+ pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
+
+ - 0 indicates sequence B is a continuation of sequence A,
+ - 1 indicates sequence B is a random sequence.
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
+ Used to hide legacy arguments that have been deprecated.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BertForPreTraining
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
+ >>> model = BertForPreTraining.from_pretrained("google-bert/bert-base-uncased")
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> prediction_logits = outputs.prediction_logits
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.bert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output, pooled_output = outputs[:2]
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
+
+ total_loss = None
+ if labels is not None and next_sentence_label is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
+ total_loss = masked_lm_loss + next_sentence_loss
+
+ if not return_dict:
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return BertForPreTrainingOutput(
+ loss=total_loss,
+ prediction_logits=prediction_scores,
+ seq_relationship_logits=seq_relationship_score,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """Bert Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_START_DOCSTRING
+)
+class BertLMHeadModel(BertPreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if not config.is_decoder:
+ logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
+
+ self.bert = BertModel(config, add_pooling_layer=False)
+ self.cls = BertOnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+ self.cls.predictions.bias = new_embeddings.bias
+
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutputWithCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.Tensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **loss_kwargs,
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ outputs = self.bert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ lm_loss = None
+ if labels is not None:
+ lm_loss = self.loss_function(prediction_scores, labels, self.config.vocab_size, **loss_kwargs)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((lm_loss,) + output) if lm_loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=lm_loss,
+ logits=prediction_scores,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
+class BertForMaskedLM(BertPreTrainedModel):
+ _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.bert = BertModel(config, add_pooling_layer=False)
+ self.cls = BertOnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+ self.cls.predictions.bias = new_embeddings.bias
+
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="'paris'",
+ expected_loss=0.88,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.bert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
+ input_shape = input_ids.shape
+ effective_batch_size = input_shape[0]
+
+ # add a dummy token
+ if self.config.pad_token_id is None:
+ raise ValueError("The PAD token should be defined for generation")
+
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
+ dummy_token = torch.full(
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
+ )
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
+
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
+
+
+@add_start_docstrings(
+ """Bert Model with a `next sentence prediction (classification)` head on top.""",
+ BERT_START_DOCSTRING,
+)
+class BertForNextSentencePrediction(BertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.bert = BertModel(config)
+ self.cls = BertOnlyNSPHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
+
+ - 0 indicates sequence B is a continuation of sequence A,
+ - 1 indicates sequence B is a random sequence.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BertForNextSentencePrediction
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
+ >>> model = BertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
+
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
+ >>> logits = outputs.logits
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
+ ```
+ """
+
+ if "next_sentence_label" in kwargs:
+ warnings.warn(
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
+ " `labels` instead.",
+ FutureWarning,
+ )
+ labels = kwargs.pop("next_sentence_label")
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.bert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ seq_relationship_scores = self.cls(pooled_output)
+
+ next_sentence_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
+
+ if not return_dict:
+ output = (seq_relationship_scores,) + outputs[2:]
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
+
+ return NextSentencePredictorOutput(
+ loss=next_sentence_loss,
+ logits=seq_relationship_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ BERT_START_DOCSTRING,
+)
+class BertForSequenceClassification(BertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.bert = BertModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.bert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ BERT_START_DOCSTRING,
+)
+class BertForMultipleChoice(BertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.bert = BertModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.bert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ BERT_START_DOCSTRING,
+)
+class BertForTokenClassification(BertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.bert = BertModel(config, add_pooling_layer=False)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.bert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ BERT_START_DOCSTRING,
+)
+class BertForQuestionAnswering(BertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.bert = BertModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_QA,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ qa_target_start_index=_QA_TARGET_START_INDEX,
+ qa_target_end_index=_QA_TARGET_END_INDEX,
+ expected_output=_QA_EXPECTED_OUTPUT,
+ expected_loss=_QA_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.bert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+__all__ = [
+ "BertForMaskedLM",
+ "BertForMultipleChoice",
+ "BertForNextSentencePrediction",
+ "BertForPreTraining",
+ "BertForQuestionAnswering",
+ "BertForSequenceClassification",
+ "BertForTokenClassification",
+ "BertLayer",
+ "BertLMHeadModel",
+ "BertModel",
+ "BertPreTrainedModel",
+ "load_tf_weights_in_bert",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bert/modeling_flax_bert.py b/janus/lib/python3.10/site-packages/transformers/models/bert/modeling_flax_bert.py
new file mode 100644
index 0000000000000000000000000000000000000000..83358c86bd280dc171f3d40d9b9b9e1dec93aa43
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/bert/modeling_flax_bert.py
@@ -0,0 +1,1727 @@
+# coding=utf-8
+# Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Callable, Optional, Tuple
+
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen import partitioning as nn_partitioning
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
+ FlaxBaseModelOutputWithPooling,
+ FlaxBaseModelOutputWithPoolingAndCrossAttentions,
+ FlaxCausalLMOutputWithCrossAttentions,
+ FlaxMaskedLMOutput,
+ FlaxMultipleChoiceModelOutput,
+ FlaxNextSentencePredictorOutput,
+ FlaxQuestionAnsweringModelOutput,
+ FlaxSequenceClassifierOutput,
+ FlaxTokenClassifierOutput,
+)
+from ...modeling_flax_utils import (
+ ACT2FN,
+ FlaxPreTrainedModel,
+ append_call_sample_docstring,
+ append_replace_return_docstrings,
+ overwrite_call_docstring,
+)
+from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_bert import BertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased"
+_CONFIG_FOR_DOC = "BertConfig"
+
+remat = nn_partitioning.remat
+
+
+@flax.struct.dataclass
+class FlaxBertForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`BertForPreTraining`].
+
+ Args:
+ prediction_logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ seq_relationship_logits (`jnp.ndarray` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ prediction_logits: jnp.ndarray = None
+ seq_relationship_logits: jnp.ndarray = None
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
+ attentions: Optional[Tuple[jnp.ndarray]] = None
+
+
+BERT_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
+
+ This model is also a
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
+ behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`BertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+
+"""
+
+BERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`numpy.ndarray` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+
+"""
+
+
+class FlaxBertEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.word_embeddings = nn.Embed(
+ self.config.vocab_size,
+ self.config.hidden_size,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.position_embeddings = nn.Embed(
+ self.config.max_position_embeddings,
+ self.config.hidden_size,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.token_type_embeddings = nn.Embed(
+ self.config.type_vocab_size,
+ self.config.hidden_size,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
+ # Embed
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
+
+ # Sum all embeddings
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
+
+ # Layer Norm
+ hidden_states = self.LayerNorm(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxBertSelfAttention(nn.Module):
+ config: BertConfig
+ causal: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.head_dim = self.config.hidden_size // self.config.num_attention_heads
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
+ raise ValueError(
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
+ " : {self.config.num_attention_heads}"
+ )
+
+ self.query = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ self.key = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ self.value = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+
+ if self.causal:
+ self.causal_mask = make_causal_mask(
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
+ )
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
+
+ @nn.compact
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ key_value_states: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic=True,
+ output_attentions: bool = False,
+ ):
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ batch_size = hidden_states.shape[0]
+
+ # get query proj
+ query_states = self.query(hidden_states)
+ # get key, value proj
+ if is_cross_attention:
+ # cross_attentions
+ key_states = self.key(key_value_states)
+ value_states = self.value(key_value_states)
+ else:
+ # self_attention
+ key_states = self.key(hidden_states)
+ value_states = self.value(hidden_states)
+
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # handle cache prepare causal attention mask
+ if self.causal:
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
+ if self.has_variable("cache", "cached_key"):
+ mask_shift = self.variables["cache"]["cache_index"]
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_mask = lax.dynamic_slice(
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
+ )
+ else:
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
+
+ # combine masks if needed
+ if attention_mask is not None and self.causal:
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
+ attention_mask = combine_masks(attention_mask, causal_mask)
+ elif self.causal:
+ attention_mask = causal_mask
+ elif attention_mask is not None:
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
+ key_states, value_states, query_states, attention_mask
+ )
+
+ # Convert the boolean attention mask to an attention bias.
+ if attention_mask is not None:
+ # attention mask in the form of attention bias
+ attention_bias = lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
+ )
+ else:
+ attention_bias = None
+
+ dropout_rng = None
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=attention_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.config.attention_probs_dropout_prob,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ precision=None,
+ )
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
+
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
+ return outputs
+
+
+class FlaxBertSelfOutput(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class FlaxBertAttention(nn.Module):
+ config: BertConfig
+ causal: bool = False
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.self = FlaxBertSelfAttention(self.config, causal=self.causal, dtype=self.dtype)
+ self.output = FlaxBertSelfOutput(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ key_value_states=None,
+ init_cache=False,
+ deterministic=True,
+ output_attentions: bool = False,
+ ):
+ # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
+ # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
+ # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
+ attn_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=layer_head_mask,
+ key_value_states=key_value_states,
+ init_cache=init_cache,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ )
+ attn_output = attn_outputs[0]
+ hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_outputs[1],)
+
+ return outputs
+
+
+class FlaxBertIntermediate(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.intermediate_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.activation = ACT2FN[self.config.hidden_act]
+
+ def __call__(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+class FlaxBertOutput(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+
+ def __call__(self, hidden_states, attention_output, deterministic: bool = True):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.LayerNorm(hidden_states + attention_output)
+ return hidden_states
+
+
+class FlaxBertLayer(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.attention = FlaxBertAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype)
+ self.intermediate = FlaxBertIntermediate(self.config, dtype=self.dtype)
+ self.output = FlaxBertOutput(self.config, dtype=self.dtype)
+ if self.config.add_cross_attention:
+ self.crossattention = FlaxBertAttention(self.config, causal=False, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ ):
+ # Self Attention
+ attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=layer_head_mask,
+ init_cache=init_cache,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ )
+ attention_output = attention_outputs[0]
+
+ # Cross-Attention Block
+ if encoder_hidden_states is not None:
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=layer_head_mask,
+ key_value_states=encoder_hidden_states,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+
+ hidden_states = self.intermediate(attention_output)
+ hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attention_outputs[1],)
+ if encoder_hidden_states is not None:
+ outputs += (cross_attention_outputs[1],)
+ return outputs
+
+
+class FlaxBertLayerCollection(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ if self.gradient_checkpointing:
+ FlaxBertCheckpointLayer = remat(FlaxBertLayer, static_argnums=(5, 6, 7))
+ self.layers = [
+ FlaxBertCheckpointLayer(self.config, name=str(i), dtype=self.dtype)
+ for i in range(self.config.num_hidden_layers)
+ ]
+ else:
+ self.layers = [
+ FlaxBertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+
+ # Check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.shape[0] != (len(self.layers)):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for "
+ f" {head_mask.shape[0]}."
+ )
+
+ for i, layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ layer_outputs = layer(
+ hidden_states,
+ attention_mask,
+ head_mask[i] if head_mask is not None else None,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ init_cache,
+ deterministic,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions)
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class FlaxBertEncoder(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.layer = FlaxBertLayerCollection(
+ self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return self.layer(
+ hidden_states,
+ attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ init_cache=init_cache,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+class FlaxBertPooler(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+
+ def __call__(self, hidden_states):
+ cls_hidden_state = hidden_states[:, 0]
+ cls_hidden_state = self.dense(cls_hidden_state)
+ return nn.tanh(cls_hidden_state)
+
+
+class FlaxBertPredictionHeadTransform(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
+ self.activation = ACT2FN[self.config.hidden_act]
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+
+ def __call__(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return self.LayerNorm(hidden_states)
+
+
+class FlaxBertLMPredictionHead(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
+
+ def setup(self):
+ self.transform = FlaxBertPredictionHeadTransform(self.config, dtype=self.dtype)
+ self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False)
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
+
+ def __call__(self, hidden_states, shared_embedding=None):
+ hidden_states = self.transform(hidden_states)
+
+ if shared_embedding is not None:
+ hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
+ else:
+ hidden_states = self.decoder(hidden_states)
+
+ bias = jnp.asarray(self.bias, self.dtype)
+ hidden_states += bias
+ return hidden_states
+
+
+class FlaxBertOnlyMLMHead(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype)
+
+ def __call__(self, hidden_states, shared_embedding=None):
+ hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding)
+ return hidden_states
+
+
+class FlaxBertOnlyNSPHead(nn.Module):
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.seq_relationship = nn.Dense(2, dtype=self.dtype)
+
+ def __call__(self, pooled_output):
+ return self.seq_relationship(pooled_output)
+
+
+class FlaxBertPreTrainingHeads(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype)
+ self.seq_relationship = nn.Dense(2, dtype=self.dtype)
+
+ def __call__(self, hidden_states, pooled_output, shared_embedding=None):
+ prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding)
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return prediction_scores, seq_relationship_score
+
+
+class FlaxBertPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BertConfig
+ base_model_prefix = "bert"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: BertConfig,
+ input_shape: Tuple = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ gradient_checkpointing: bool = False,
+ **kwargs,
+ ):
+ module = self.module_class(
+ config=config,
+ dtype=dtype,
+ gradient_checkpointing=gradient_checkpointing,
+ **kwargs,
+ )
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def enable_gradient_checkpointing(self):
+ self._module = self.module_class(
+ config=self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=True,
+ )
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ token_type_ids = jnp.zeros_like(input_ids)
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
+ attention_mask = jnp.ones_like(input_ids)
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ if self.config.add_cross_attention:
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
+ encoder_attention_mask = attention_mask
+ module_init_outputs = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ return_dict=False,
+ )
+ else:
+ module_init_outputs = self.module.init(
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
+ )
+
+ random_params = module_init_outputs["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache
+ def init_cache(self, batch_size, max_length):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ """
+ # init input variables to retrieve cache
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ attention_mask = jnp.ones_like(input_ids, dtype="i4")
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
+ )
+ return unfreeze(init_variables["cache"])
+
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ token_type_ids=None,
+ position_ids=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ params: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ past_key_values: dict = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # init input tensors if not passed
+ if token_type_ids is None:
+ token_type_ids = jnp.zeros_like(input_ids)
+
+ if position_ids is None:
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ if head_mask is None:
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ if self.config.add_cross_attention:
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
+ # changed by FlaxBertAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ outputs = self.module.apply(
+ inputs,
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ head_mask=jnp.array(head_mask, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ deterministic=not train,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ rngs=rngs,
+ mutable=mutable,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past_key_values = outputs
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past_key_values = outputs
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
+
+ else:
+ outputs = self.module.apply(
+ inputs,
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ head_mask=jnp.array(head_mask, dtype="i4"),
+ deterministic=not train,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ rngs=rngs,
+ )
+
+ return outputs
+
+
+class FlaxBertModule(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ add_pooling_layer: bool = True
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.embeddings = FlaxBertEmbeddings(self.config, dtype=self.dtype)
+ self.encoder = FlaxBertEncoder(
+ self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.pooler = FlaxBertPooler(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ head_mask: Optional[jnp.ndarray] = None,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # make sure `token_type_ids` is correctly initialized when not passed
+ if token_type_ids is None:
+ token_type_ids = jnp.zeros_like(input_ids)
+
+ # make sure `position_ids` is correctly initialized when not passed
+ if position_ids is None:
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
+
+ hidden_states = self.embeddings(
+ input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
+ )
+ outputs = self.encoder(
+ hidden_states,
+ attention_mask,
+ head_mask=head_mask,
+ deterministic=deterministic,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = outputs[0]
+ pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
+
+ if not return_dict:
+ # if pooled is None, don't return it
+ if pooled is None:
+ return (hidden_states,) + outputs[1:]
+ return (hidden_states, pooled) + outputs[1:]
+
+ return FlaxBaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ pooler_output=pooled,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
+ BERT_START_DOCSTRING,
+)
+class FlaxBertModel(FlaxBertPreTrainedModel):
+ module_class = FlaxBertModule
+
+
+append_call_sample_docstring(FlaxBertModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC)
+
+
+class FlaxBertForPreTrainingModule(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.bert = FlaxBertModule(
+ config=self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.cls = FlaxBertPreTrainingHeads(config=self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.bert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ else:
+ shared_embedding = None
+
+ hidden_states = outputs[0]
+ pooled_output = outputs[1]
+
+ prediction_scores, seq_relationship_score = self.cls(
+ hidden_states, pooled_output, shared_embedding=shared_embedding
+ )
+
+ if not return_dict:
+ return (prediction_scores, seq_relationship_score) + outputs[2:]
+
+ return FlaxBertForPreTrainingOutput(
+ prediction_logits=prediction_scores,
+ seq_relationship_logits=seq_relationship_score,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
+ sentence prediction (classification)` head.
+ """,
+ BERT_START_DOCSTRING,
+)
+class FlaxBertForPreTraining(FlaxBertPreTrainedModel):
+ module_class = FlaxBertForPreTrainingModule
+
+
+FLAX_BERT_FOR_PRETRAINING_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxBertForPreTraining
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
+ >>> model = FlaxBertForPreTraining.from_pretrained("google-bert/bert-base-uncased")
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
+ >>> outputs = model(**inputs)
+
+ >>> prediction_logits = outputs.prediction_logits
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
+ ```
+"""
+
+overwrite_call_docstring(
+ FlaxBertForPreTraining,
+ BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_BERT_FOR_PRETRAINING_DOCSTRING,
+)
+append_replace_return_docstrings(
+ FlaxBertForPreTraining, output_type=FlaxBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
+)
+
+
+class FlaxBertForMaskedLMModule(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.bert = FlaxBertModule(
+ config=self.config,
+ add_pooling_layer=False,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.bert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ else:
+ shared_embedding = None
+
+ # Compute the prediction scores
+ logits = self.cls(hidden_states, shared_embedding=shared_embedding)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxMaskedLMOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
+class FlaxBertForMaskedLM(FlaxBertPreTrainedModel):
+ module_class = FlaxBertForMaskedLMModule
+
+
+append_call_sample_docstring(FlaxBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
+
+
+class FlaxBertForNextSentencePredictionModule(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.bert = FlaxBertModule(
+ config=self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.cls = FlaxBertOnlyNSPHead(dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # Model
+ outputs = self.bert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+ seq_relationship_scores = self.cls(pooled_output)
+
+ if not return_dict:
+ return (seq_relationship_scores,) + outputs[2:]
+
+ return FlaxNextSentencePredictorOutput(
+ logits=seq_relationship_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """Bert Model with a `next sentence prediction (classification)` head on top.""",
+ BERT_START_DOCSTRING,
+)
+class FlaxBertForNextSentencePrediction(FlaxBertPreTrainedModel):
+ module_class = FlaxBertForNextSentencePredictionModule
+
+
+FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxBertForNextSentencePrediction
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
+ >>> model = FlaxBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="jax")
+
+ >>> outputs = model(**encoding)
+ >>> logits = outputs.logits
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
+ ```
+"""
+
+
+overwrite_call_docstring(
+ FlaxBertForNextSentencePrediction,
+ BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING,
+)
+append_replace_return_docstrings(
+ FlaxBertForNextSentencePrediction, output_type=FlaxNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC
+)
+
+
+class FlaxBertForSequenceClassificationModule(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.bert = FlaxBertModule(
+ config=self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ classifier_dropout = (
+ self.config.classifier_dropout
+ if self.config.classifier_dropout is not None
+ else self.config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(rate=classifier_dropout)
+ self.classifier = nn.Dense(
+ self.config.num_labels,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.bert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output)
+
+ if not return_dict:
+ return (logits,) + outputs[2:]
+
+ return FlaxSequenceClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ BERT_START_DOCSTRING,
+)
+class FlaxBertForSequenceClassification(FlaxBertPreTrainedModel):
+ module_class = FlaxBertForSequenceClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxBertForSequenceClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxSequenceClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxBertForMultipleChoiceModule(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.bert = FlaxBertModule(
+ config=self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+ self.classifier = nn.Dense(1, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ num_choices = input_ids.shape[1]
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
+
+ # Model
+ outputs = self.bert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output)
+
+ reshaped_logits = logits.reshape(-1, num_choices)
+
+ if not return_dict:
+ return (reshaped_logits,) + outputs[2:]
+
+ return FlaxMultipleChoiceModelOutput(
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ BERT_START_DOCSTRING,
+)
+class FlaxBertForMultipleChoice(FlaxBertPreTrainedModel):
+ module_class = FlaxBertForMultipleChoiceModule
+
+
+overwrite_call_docstring(
+ FlaxBertForMultipleChoice, BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+)
+append_call_sample_docstring(
+ FlaxBertForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC
+)
+
+
+class FlaxBertForTokenClassificationModule(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.bert = FlaxBertModule(
+ config=self.config,
+ dtype=self.dtype,
+ add_pooling_layer=False,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ classifier_dropout = (
+ self.config.classifier_dropout
+ if self.config.classifier_dropout is not None
+ else self.config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(rate=classifier_dropout)
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.bert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.classifier(hidden_states)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxTokenClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ BERT_START_DOCSTRING,
+)
+class FlaxBertForTokenClassification(FlaxBertPreTrainedModel):
+ module_class = FlaxBertForTokenClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxBertForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC
+)
+
+
+class FlaxBertForQuestionAnsweringModule(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.bert = FlaxBertModule(
+ config=self.config,
+ dtype=self.dtype,
+ add_pooling_layer=False,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.bert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+
+ logits = self.qa_outputs(hidden_states)
+ start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = end_logits.squeeze(-1)
+
+ if not return_dict:
+ return (start_logits, end_logits) + outputs[1:]
+
+ return FlaxQuestionAnsweringModelOutput(
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ BERT_START_DOCSTRING,
+)
+class FlaxBertForQuestionAnswering(FlaxBertPreTrainedModel):
+ module_class = FlaxBertForQuestionAnsweringModule
+
+
+append_call_sample_docstring(
+ FlaxBertForQuestionAnswering,
+ _CHECKPOINT_FOR_DOC,
+ FlaxQuestionAnsweringModelOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxBertForCausalLMModule(nn.Module):
+ config: BertConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.bert = FlaxBertModule(
+ config=self.config,
+ add_pooling_layer=False,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ token_type_ids: Optional[jnp.ndarray] = None,
+ head_mask: Optional[jnp.ndarray] = None,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.bert(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ init_cache=init_cache,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ else:
+ shared_embedding = None
+
+ # Compute the prediction scores
+ logits = self.cls(hidden_states, shared_embedding=shared_embedding)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxCausalLMOutputWithCrossAttentions(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bert Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for
+ autoregressive tasks.
+ """,
+ BERT_START_DOCSTRING,
+)
+class FlaxBertForCausalLM(FlaxBertPreTrainedModel):
+ module_class = FlaxBertForCausalLMModule
+
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
+ # initializing the cache
+ batch_size, seq_length = input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyway.
+ # Thus, we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if attention_mask is not None:
+ position_ids = attention_mask.cumsum(axis=-1) - 1
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
+ else:
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
+
+ return {
+ "past_key_values": past_key_values,
+ "attention_mask": extended_attention_mask,
+ "position_ids": position_ids,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
+ return model_kwargs
+
+
+append_call_sample_docstring(
+ FlaxBertForCausalLM,
+ _CHECKPOINT_FOR_DOC,
+ FlaxCausalLMOutputWithCrossAttentions,
+ _CONFIG_FOR_DOC,
+)
+
+
+__all__ = [
+ "FlaxBertForCausalLM",
+ "FlaxBertForMaskedLM",
+ "FlaxBertForMultipleChoice",
+ "FlaxBertForNextSentencePrediction",
+ "FlaxBertForPreTraining",
+ "FlaxBertForQuestionAnswering",
+ "FlaxBertForSequenceClassification",
+ "FlaxBertForTokenClassification",
+ "FlaxBertModel",
+ "FlaxBertPreTrainedModel",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert_fast.py b/janus/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a89e6053b988f5b9d8131304c7b3e7e74dba5fd
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert_fast.py
@@ -0,0 +1,175 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fast Tokenization classes for Bert."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_bert import BertTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+
+class BertTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" BERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ slow_tokenizer_class = BertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
+
+
+__all__ = ["BertTokenizerFast"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f384765d772183a5d24087117d066aaf866a6f83
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip_text.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..415fbe9543157c098da8441bf4722e62e3f8a8df
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip_text.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7176ea95ba3b8e48cee0a5d4adf6dbe5b4489629
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip_text.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..05903eb4b45e34d0ac77cc97d02418436356c365
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip_text.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blip/image_processing_blip.py b/janus/lib/python3.10/site-packages/transformers/models/blip/image_processing_blip.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f7683d08d189e7cce1be2811b120a7598f18818
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/blip/image_processing_blip.py
@@ -0,0 +1,297 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for BLIP."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
+from ...image_utils import (
+ OPENAI_CLIP_MEAN,
+ OPENAI_CLIP_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+
+logger = logging.get_logger(__name__)
+
+
+class BlipImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a BLIP image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
+ overridden by the `resample` parameter in the `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
+ `do_rescale` parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
+ overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
+ Whether to convert the image to RGB.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: bool = True,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 384, "width": 384}
+ size = get_size_dict(size, default_to_square=True)
+
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
+ self.do_convert_rgb = do_convert_rgb
+
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ size = get_size_dict(size)
+ if "height" not in size or "width" not in size:
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
+ output_size = (size["height"], size["width"])
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ @filter_out_non_signature_kwargs()
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: Optional[bool] = None,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ do_convert_rgb: bool = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Controls the size of the image after `resize`. The shortest edge of the image is resized to
+ `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
+ is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
+ edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to normalize the image by if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
+ Whether to convert the image to RGB.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ resample = resample if resample is not None else self.resample
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+ # PIL RGBA images are converted to RGB
+ if do_convert_rgb:
+ images = [convert_to_rgb(image) for image in images]
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if do_rescale and is_scaled_image(images[0]):
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
+
+ return encoded_outputs
+
+
+__all__ = ["BlipImageProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blip/modeling_blip.py b/janus/lib/python3.10/site-packages/transformers/models/blip/modeling_blip.py
new file mode 100644
index 0000000000000000000000000000000000000000..27dbbee6c671ee10e7af8b5e3c97bedd1a3bb4f1
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/blip/modeling_blip.py
@@ -0,0 +1,1596 @@
+# coding=utf-8
+# Copyright 2022 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch BLIP model."""
+
+import warnings
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn.functional import normalize
+
+from ...activations import ACT2FN
+from ...generation import GenerationMixin
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+ torch_int,
+)
+from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig
+from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "Salesforce/blip-vqa-base"
+
+
+# Copied from transformers.models.clip.modeling_clip.contrastive_loss
+def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
+
+
+# Copied from transformers.models.clip.modeling_clip.clip_loss with clip->blip
+def blip_loss(similarity: torch.Tensor) -> torch.Tensor:
+ caption_loss = contrastive_loss(similarity)
+ image_loss = contrastive_loss(similarity.t())
+ return (caption_loss + image_loss) / 2.0
+
+
+@dataclass
+class BlipForConditionalGenerationModelOutput(ModelOutput):
+ """
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
+ last hidden states. This class also adds the loss term from the text decoder.
+
+ Args:
+ loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Languge modeling loss from the text decoder.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*):
+ Prediction scores of the language modeling head of the text decoder model.
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*):
+ The image embeddings obtained after applying the Vision Transformer model to the input image.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[Tuple[torch.FloatTensor]] = None
+ logits: Optional[Tuple[torch.FloatTensor]] = None
+ image_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+ @property
+ def decoder_logits(self):
+ warnings.warn(
+ "`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers."
+ " Please use the `logits` attribute to retrieve the final output instead.",
+ FutureWarning,
+ )
+ return self.logits
+
+
+@dataclass
+class BlipTextVisionModelOutput(ModelOutput):
+ """
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
+ last hidden states. This class also adds the loss term from the text decoder.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Languge modeling loss from the text decoder.
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The image embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ image_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class BlipImageTextMatchingModelOutput(ModelOutput):
+ """
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
+ last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity
+ scores.
+
+ Args:
+ itm_score (`torch.FloatTensor`):
+ The image-text similarity scores.
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Languge modeling loss from the text decoder.
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The image embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ vision_pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*):
+ Last layer hidden-state of the vision of the vision-only branch of the model.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ question_embeds (`torch.FloatTensor`):
+ The question embeddings obtained by the text projection layer.
+ """
+
+ itm_score: Optional[torch.FloatTensor] = None
+ loss: Optional[torch.FloatTensor] = None
+ image_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ vision_pooler_output: Optional[torch.FloatTensor] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ question_embeds: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class BlipOutput(ModelOutput):
+ """
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for image-text similarity.
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
+ similarity scores.
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
+ similarity scores.
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`].
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`].
+ text_model_output(`BaseModelOutputWithPooling`):
+ The output of the [`BlipTextModel`].
+ vision_model_output(`BaseModelOutputWithPooling`):
+ The output of the [`BlipVisionModel`].
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits_per_image: torch.FloatTensor = None
+ logits_per_text: torch.FloatTensor = None
+ text_embeds: torch.FloatTensor = None
+ image_embeds: torch.FloatTensor = None
+ text_model_output: BaseModelOutputWithPooling = None
+ vision_model_output: BaseModelOutputWithPooling = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+class BlipVisionEmbeddings(nn.Module):
+ def __init__(self, config: BlipVisionConfig):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
+
+ self.patch_embedding = nn.Conv2d(
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
+ )
+
+ self.num_patches = (self.image_size // self.patch_size) ** 2
+ self.num_positions = self.num_patches + 1
+
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
+ images. This method is also adapted to support torch.jit tracing.
+
+ Adapted from:
+ - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
+ - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
+ """
+
+ num_patches = embeddings.shape[1] - 1
+ num_positions = self.position_embedding.shape[1] - 1
+
+ # always interpolate when tracing to ensure the exported model works for dynamic input shapes
+ if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
+ return self.position_embedding
+
+ class_pos_embed = self.position_embedding[:, :1]
+ patch_pos_embed = self.position_embedding[:, 1:]
+
+ dim = embeddings.shape[-1]
+
+ new_height = height // self.patch_size
+ new_width = width // self.patch_size
+
+ sqrt_num_positions = torch_int(num_positions**0.5)
+ patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
+
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed,
+ size=(new_height, new_width),
+ mode="bicubic",
+ align_corners=False,
+ )
+
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+
+ return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
+
+ def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
+ batch_size, _, height, width = pixel_values.shape
+ target_dtype = self.patch_embedding.weight.dtype
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
+ if interpolate_pos_encoding:
+ position_embedding = self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ position_embedding = self.position_embedding
+ embeddings = embeddings + position_embedding[:, : embeddings.size(1), :].to(target_dtype)
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->Blip
+class BlipTextEmbeddings(nn.Module):
+ def __init__(self, config: BlipTextConfig):
+ super().__init__()
+ embed_dim = config.hidden_size
+
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ ) -> torch.Tensor:
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, :seq_length]
+
+ if inputs_embeds is None:
+ inputs_embeds = self.token_embedding(input_ids)
+
+ position_embeddings = self.position_embedding(position_ids)
+ embeddings = inputs_embeds + position_embeddings
+
+ return embeddings
+
+
+class BlipAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = nn.Dropout(config.attention_dropout)
+
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim)
+
+ self.projection = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = hidden_states.size()
+
+ mixed_qkv = (
+ self.qkv(hidden_states)
+ .reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads)
+ .permute(2, 0, 3, 1, 4)
+ )
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
+
+ attention_scores = attention_scores * self.scale
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
+
+ new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
+ context_layer = context_layer.reshape(new_context_layer_shape)
+
+ output = self.projection(context_layer)
+
+ outputs = (output, attention_probs) if output_attentions else (output, None)
+
+ return outputs
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Blip
+class BlipMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+class BlipEncoderLayer(nn.Module):
+ def __init__(self, config: BlipConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.self_attn = BlipAttention(config)
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = BlipMLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ head_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + residual
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+
+ hidden_states = hidden_states + residual
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class BlipPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BlipConfig
+ base_model_prefix = "blip"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["BlipEncoderLayer", "BlipTextEmbeddings"]
+ _skip_keys_device_placement = ["past_key_value"]
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_range
+ if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=factor)
+ if hasattr(module, "bias") and module.bias is not None:
+ module.bias.data.zero_()
+
+ if isinstance(module, BlipVisionEmbeddings):
+ if hasattr(self.config, "vision_config"):
+ factor = self.config.vision_config.initializer_range
+ nn.init.trunc_normal_(
+ module.position_embedding,
+ mean=0.0,
+ std=factor,
+ )
+
+ nn.init.trunc_normal_(
+ module.class_embedding,
+ mean=0.0,
+ std=factor,
+ )
+
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Linear) and module.bias is not None:
+ module.bias.data.zero_()
+
+
+BLIP_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`BlipConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BLIP_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+BLIP_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
+ Whether to interpolate the pre-trained position encodings.
+"""
+
+BLIP_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
+ Whether to interpolate the pre-trained position encodings.
+"""
+
+
+class BlipEncoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`BlipEncoderLayer`].
+
+ Args:
+ config (`BlipConfig`):
+ The corresponding vision configuration for the `BlipEncoder`.
+ """
+
+ def __init__(self, config: BlipConfig):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ inputs_embeds,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Embedded representation of the inputs. Should be float, not int tokens.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class BlipVisionModel(BlipPreTrainedModel):
+ main_input_name = "pixel_values"
+ config_class = BlipVisionConfig
+
+ def __init__(self, config: BlipVisionConfig):
+ super().__init__(config)
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = BlipVisionEmbeddings(config)
+ self.encoder = BlipEncoder(config)
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=BlipVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ last_hidden_state = self.post_layernorm(last_hidden_state)
+
+ pooled_output = last_hidden_state[:, 0, :]
+ pooled_output = self.post_layernorm(pooled_output)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+
+@add_start_docstrings(
+ """
+ This model is going to be deprecated in future versions. Please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase.
+ """,
+ BLIP_START_DOCSTRING,
+)
+class BlipModel(BlipPreTrainedModel):
+ config_class = BlipConfig
+
+ def __init__(self, config: BlipConfig):
+ super().__init__(config)
+
+ if not isinstance(config.text_config, BlipTextConfig):
+ raise TypeError(
+ "config.text_config is expected to be of type BlipTextConfig but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.vision_config, BlipVisionConfig):
+ raise TypeError(
+ "config.vision_config is expected to be of type BlipVisionConfig but is of type"
+ f" {type(config.vision_config)}."
+ )
+
+ text_config = config.text_config
+ vision_config = config.vision_config
+
+ self.projection_dim = config.projection_dim
+ self.text_embed_dim = text_config.hidden_size
+ self.vision_embed_dim = vision_config.hidden_size
+
+ self.text_model = BlipTextModel(text_config)
+ self.vision_model = BlipVisionModel(vision_config)
+
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
+
+ logger.warning(
+ "`BlipModel` is going to be deprecated in future release, please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase."
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.text_model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.text_model.set_input_embeddings(value)
+
+ @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
+ def get_text_features(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
+ applying the projection layer to the pooled output of [`BlipTextModel`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor, BlipModel
+
+ >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
+ >>> text_features = model.get_text_features(**inputs)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[1]
+ text_features = self.text_projection(pooled_output)
+
+ return text_features
+
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
+ def get_image_features(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
+ applying the projection layer to the pooled output of [`BlipVisionModel`].
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, BlipModel
+
+ >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> image_features = model.get_image_features(**inputs)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ return_dict=return_dict,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+
+ pooled_output = vision_outputs[1] # pooled_output
+ image_features = self.visual_projection(pooled_output)
+
+ return image_features
+
+ @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING)
+ def get_multimodal_features(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ multimodal_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The multimodal embeddings
+ obtained by applying the image embeddings to the text encoder using the cross-attention mechanism.
+
+ Examples:
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, BlipModel
+
+ >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> texts = ["a photo of a cat", "a photo of a dog"]
+ >>> inputs = processor(images=image, text=texts, padding=True, return_tensors="pt")
+
+ >>> multimodal_features = model.get_multimodal_features(**inputs)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=True,
+ output_hidden_states=True,
+ return_dict=return_dict,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+
+ image_embeds = vision_outputs[0]
+ image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long)
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_atts,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[1] # pooled_output
+ multimodal_features = self.text_projection(pooled_output)
+
+ return multimodal_features
+
+ @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BlipOutput, config_class=BlipConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> Union[Tuple, BlipOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, BlipModel
+
+ >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
+ ... )
+
+ >>> outputs = model(**inputs)
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
+ ```"""
+ # Use BLIP model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ image_embeds = vision_outputs[1]
+ image_embeds = self.visual_projection(image_embeds)
+
+ text_embeds = text_outputs[1]
+ text_embeds = self.text_projection(text_embeds)
+
+ # normalized features
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
+
+ # cosine similarity as logits
+ logit_scale = self.logit_scale.exp().to(device=text_embeds.device)
+ image_embeds = image_embeds.to(device=text_embeds.device, dtype=text_embeds.dtype)
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
+ logits_per_image = logits_per_text.t()
+
+ loss = None
+ if return_loss:
+ loss = blip_loss(logits_per_text)
+
+ if not return_dict:
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return BlipOutput(
+ loss=loss,
+ logits_per_image=logits_per_image,
+ logits_per_text=logits_per_text,
+ text_embeds=text_embeds,
+ image_embeds=image_embeds,
+ text_model_output=text_outputs,
+ vision_model_output=vision_outputs,
+ )
+
+
+@add_start_docstrings(
+ """
+ BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass
+ `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise,
+ the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption
+ from the text input. If no text input is provided, the decoder will start with the [BOS] token only.
+ """,
+ BLIP_START_DOCSTRING,
+)
+class BlipForConditionalGeneration(BlipPreTrainedModel, GenerationMixin):
+ config_class = BlipConfig
+ _tied_weights_keys = ["text_decoder.cls.predictions.decoder.bias"]
+ main_input_name = "pixel_values"
+
+ def __init__(self, config: BlipConfig):
+ super().__init__(config)
+
+ self.vision_model = BlipVisionModel(config.vision_config)
+
+ self.text_decoder = BlipTextLMHeadModel(config.text_config)
+
+ self.decoder_input_ids = config.text_config.bos_token_id
+ self.decoder_pad_token_id = config.text_config.pad_token_id
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.text_decoder.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.text_decoder.set_input_embeddings(value)
+
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BlipForConditionalGenerationModelOutput, config_class=BlipVisionConfig)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> Union[Tuple, BlipForConditionalGenerationModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, BlipForConditionalGeneration
+
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> text = "A picture of"
+
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ ```"""
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+
+ image_embeds = vision_outputs[0]
+
+ outputs = self.text_decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ labels=labels,
+ return_dict=return_dict,
+ reduction="mean",
+ )
+
+ if not return_dict:
+ outputs = (outputs[0], outputs[1]) if labels is not None else (outputs[0],)
+ outputs += (image_embeds, vision_outputs[0]) + vision_outputs[2:]
+ return tuple(output for output in outputs if output is not None)
+
+ return BlipForConditionalGenerationModelOutput(
+ loss=outputs.loss,
+ logits=outputs.logits,
+ image_embeds=image_embeds,
+ last_hidden_state=vision_outputs.last_hidden_state,
+ hidden_states=vision_outputs.hidden_states,
+ attentions=vision_outputs.attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ pixel_values: torch.FloatTensor,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ interpolate_pos_encoding: bool = False,
+ **generate_kwargs,
+ ) -> torch.LongTensor:
+ r"""
+ Overrides *generate* function to be able to use the model as a conditional generator
+
+ Parameters:
+ pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*:
+ Input image to be processed
+ input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
+ The sequence used as a prompt for the generation.
+ attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+
+ Examples:
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, BlipForConditionalGeneration
+
+ >>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> outputs = model.generate(**inputs)
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
+ two cats sleeping on a couch
+ ```
+ """
+
+ batch_size = pixel_values.shape[0]
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+
+ image_embeds = vision_outputs[0]
+
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device)
+
+ if isinstance(input_ids, list):
+ input_ids = torch.LongTensor(input_ids)
+ elif input_ids is None:
+ input_ids = (
+ torch.LongTensor([[self.decoder_input_ids, self.config.text_config.eos_token_id]])
+ .repeat(batch_size, 1)
+ .to(image_embeds.device)
+ )
+
+ input_ids[:, 0] = self.config.text_config.bos_token_id
+ attention_mask = attention_mask[:, :-1] if attention_mask is not None else None
+
+ outputs = self.text_decoder.generate(
+ input_ids=input_ids[:, :-1],
+ eos_token_id=self.config.text_config.sep_token_id,
+ pad_token_id=self.config.text_config.pad_token_id,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ **generate_kwargs,
+ )
+
+ return outputs
+
+
+@add_start_docstrings(
+ """
+ BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text
+ decoder. The vision encoder will encode the input image, the text encoder will encode the input question together
+ with the encoding of the image, and the text decoder will output the answer to the question.
+ """,
+ BLIP_START_DOCSTRING,
+)
+class BlipForQuestionAnswering(BlipPreTrainedModel):
+ config_class = BlipConfig
+ _tied_weights_keys = ["text_decoder.cls.predictions.decoder.bias"]
+
+ def __init__(self, config: BlipConfig):
+ super().__init__(config)
+
+ self.vision_model = BlipVisionModel(config.vision_config)
+
+ self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False)
+
+ self.text_decoder = BlipTextLMHeadModel(config.text_config)
+
+ self.decoder_pad_token_id = config.text_config.pad_token_id
+ self.decoder_start_token_id = config.text_config.bos_token_id
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def set_input_embeddings(self, value):
+ self.text_encoder.set_input_embeddings(value)
+
+ def get_input_embeddings(self):
+ # This will return shared embeddings if they are shared else specific to encoder.
+ return self.text_encoder.get_input_embeddings()
+
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig)
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ pixel_values: torch.FloatTensor,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> Union[Tuple, BlipTextVisionModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, BlipForQuestionAnswering
+
+ >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> # training
+ >>> text = "How many cats are in the picture?"
+ >>> label = "2"
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
+ >>> labels = processor(text=label, return_tensors="pt").input_ids
+
+ >>> inputs["labels"] = labels
+ >>> outputs = model(**inputs)
+ >>> loss = outputs.loss
+ >>> loss.backward()
+
+ >>> # inference
+ >>> text = "How many cats are in the picture?"
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
+ >>> outputs = model.generate(**inputs)
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
+ 2
+ ```"""
+ if labels is None and decoder_input_ids is None:
+ raise ValueError(
+ "Either `decoder_input_ids` or `labels` should be passed when calling `forward` with"
+ " `BlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you"
+ " are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`"
+ )
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+
+ image_embeds = vision_outputs[0]
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long)
+
+ question_embeds = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ return_dict=return_dict,
+ )
+
+ if labels is not None and decoder_input_ids is None:
+ # labels are already shifted right, see: https://github.com/huggingface/transformers/pull/23153
+ decoder_input_ids = labels
+
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
+
+ answer_output = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=question_embeds,
+ encoder_attention_mask=attention_mask,
+ labels=labels,
+ return_dict=return_dict,
+ reduction="mean",
+ )
+
+ if labels is not None:
+ decoder_loss = answer_output.loss.mean() if return_dict else answer_output[0].mean()
+ else:
+ decoder_loss = None
+
+ if not return_dict:
+ outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:]
+ return tuple(output for output in outputs if output is not None)
+
+ return BlipTextVisionModelOutput(
+ loss=decoder_loss,
+ image_embeds=image_embeds,
+ last_hidden_state=vision_outputs.last_hidden_state,
+ hidden_states=vision_outputs.hidden_states,
+ attentions=vision_outputs.attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: torch.LongTensor,
+ pixel_values: torch.FloatTensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ interpolate_pos_encoding: bool = False,
+ **generate_kwargs,
+ ) -> torch.LongTensor:
+ r"""
+ Overrides *generate* function to be able to use the model as a conditional generator
+
+ Parameters:
+ input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*):
+ The sequence used as a prompt for the generation.
+ pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*:
+ Input image to be processed
+ attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for
+ tokens that are NOT MASKED, `0` for MASKED tokens.
+ **generate_kwargs:
+ Additional arguments passed to the *generate* function of the decoder
+
+
+ Examples:
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, BlipForQuestionAnswering
+
+ >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> text = "How many cats are in the picture?"
+
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
+
+ >>> outputs = model.generate(**inputs)
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
+ 2
+ ```
+ """
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+
+ image_embeds = vision_outputs[0]
+
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device)
+
+ if isinstance(input_ids, list):
+ input_ids = torch.LongTensor(input_ids)
+
+ question_outputs = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ return_dict=False,
+ )
+
+ question_embeds = question_outputs[0]
+
+ question_attention_mask = torch.ones(question_embeds.size()[:-1], dtype=torch.long).to(question_embeds.device)
+
+ bos_ids = torch.full(
+ (question_embeds.size(0), 1), fill_value=self.decoder_start_token_id, device=question_embeds.device
+ )
+
+ outputs = self.text_decoder.generate(
+ input_ids=bos_ids,
+ eos_token_id=self.config.text_config.sep_token_id,
+ pad_token_id=self.config.text_config.pad_token_id,
+ encoder_hidden_states=question_embeds,
+ encoder_attention_mask=question_attention_mask,
+ **generate_kwargs,
+ )
+
+ return outputs
+
+
+@add_start_docstrings(
+ """
+ BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of
+ image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to
+ the image.
+ """,
+ BLIP_START_DOCSTRING,
+)
+class BlipForImageTextRetrieval(BlipPreTrainedModel):
+ config_class = BlipConfig
+
+ def __init__(self, config: BlipConfig):
+ super().__init__(config)
+
+ self.vision_model = BlipVisionModel(config.vision_config)
+
+ self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False)
+
+ # vision projection layer
+ self.vision_proj = nn.Linear(config.vision_config.hidden_size, config.image_text_hidden_size)
+
+ # text projection layer
+ self.text_proj = nn.Linear(config.text_config.hidden_size, config.image_text_hidden_size)
+
+ # image text matching head
+ self.itm_head = nn.Linear(config.text_config.hidden_size, 2)
+
+ self.decoder_pad_token_id = (
+ config.text_config.pad_token_id
+ if not hasattr(config, "decoder_pad_token_id")
+ else config.decoder_pad_token_id
+ )
+ self.decoder_start_token_id = (
+ config.text_config.bos_token_id
+ if not hasattr(config, "decoder_start_token_id")
+ else config.decoder_start_token_id
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.text_encoder.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.text_encoder.set_input_embeddings(value)
+
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig)
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ pixel_values: torch.FloatTensor,
+ use_itm_head: Optional[bool] = True,
+ attention_mask: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> Union[Tuple, BlipTextVisionModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, BlipForImageTextRetrieval
+
+ >>> model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> text = "an image of a cat"
+
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ )
+
+ image_embeds = vision_outputs[0]
+ image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long)
+
+ if use_itm_head:
+ question_embeds = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_atts,
+ return_dict=return_dict,
+ )
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
+
+ output = self.itm_head(question_embeds[:, 0, :])
+ else:
+ question_embeds = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ return_dict=return_dict,
+ )
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
+
+ image_feat = normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
+ text_feat = normalize(self.text_proj(question_embeds[:, 0, :]), dim=-1)
+
+ output = image_feat @ text_feat.t()
+
+ if not return_dict:
+ outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,)
+ return tuple(output for output in outputs if output is not None)
+
+ return BlipImageTextMatchingModelOutput(
+ itm_score=output,
+ last_hidden_state=vision_outputs.last_hidden_state,
+ hidden_states=vision_outputs.hidden_states,
+ attentions=vision_outputs.attentions,
+ question_embeds=question_embeds,
+ )
+
+
+__all__ = [
+ "BlipModel",
+ "BlipPreTrainedModel",
+ "BlipForConditionalGeneration",
+ "BlipForQuestionAnswering",
+ "BlipVisionModel",
+ "BlipTextModel",
+ "BlipForImageTextRetrieval",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip.py b/janus/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip.py
new file mode 100644
index 0000000000000000000000000000000000000000..92f61bf470d93f53d3aac8b6071d94cacd885dea
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip.py
@@ -0,0 +1,1709 @@
+# coding=utf-8
+# Copyright 2023 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""TensorFlow BLIP model."""
+
+from __future__ import annotations
+
+import warnings
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple, Union
+
+import tensorflow as tf
+
+from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling
+from ...modeling_tf_utils import (
+ TFPreTrainedModel,
+ get_initializer,
+ get_tf_activation,
+ keras,
+ keras_serializable,
+ shape_list,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig
+from .modeling_tf_blip_text import BLIP_TEXT_INPUTS_DOCSTRING, TFBlipTextLMHeadModel, TFBlipTextModel
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "Salesforce/blip-vqa-base"
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.contrastive_loss
+def contrastive_loss(logits: tf.Tensor) -> tf.Tensor:
+ return tf.math.reduce_mean(
+ keras.metrics.sparse_categorical_crossentropy(
+ y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True
+ )
+ )
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.clip_loss with clip->blip
+def blip_loss(similarity: tf.Tensor) -> tf.Tensor:
+ caption_loss = contrastive_loss(similarity)
+ image_loss = contrastive_loss(tf.transpose(similarity))
+ return (caption_loss + image_loss) / 2.0
+
+
+@dataclass
+class TFBlipForConditionalGenerationModelOutput(ModelOutput):
+ """
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
+ last hidden states. This class also adds the loss term from the text decoder.
+
+ Args:
+ loss (`tf.Tensor`, *optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):
+ Languge modeling loss from the text decoder.
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*):
+ Prediction scores of the language modeling head of the text decoder model.
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)`, *optional*):
+ The image embeddings obtained after applying the Vision Transformer model to the input image.
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.`
+ """
+
+ loss: Tuple[tf.Tensor] | None = None
+ logits: Tuple[tf.Tensor] | None = None
+ image_embeds: tf.Tensor | None = None
+ last_hidden_state: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+
+ @property
+ def decoder_logits(self):
+ warnings.warn(
+ "`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers."
+ " Please use the `logits` attribute to retrieve the final output instead.",
+ FutureWarning,
+ )
+ return self.logits
+
+
+@dataclass
+class TFBlipTextVisionModelOutput(ModelOutput):
+ """
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
+ last hidden states. This class also adds the loss term from the text decoder.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Languge modeling loss from the text decoder.
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The image embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor | None = None
+ image_embeds: tf.Tensor | None = None
+ last_hidden_state: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFBlipImageTextMatchingModelOutput(ModelOutput):
+ """
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
+ last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity
+ scores.
+
+ Args:
+ itm_score (`tf.Tensor`):
+ The image-text similarity scores.
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Languge modeling loss from the text decoder.
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The image embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ vision_pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`, *optional*):
+ Last layer hidden-state of the vision of the vision-only branch of the model.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ question_embeds (`tf.Tensor`):
+ The question embeddings obtained by the text projection layer.
+ """
+
+ itm_score: tf.Tensor | None = None
+ loss: tf.Tensor | None = None
+ image_embeds: tf.Tensor | None = None
+ last_hidden_state: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ vision_pooler_output: tf.Tensor | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ question_embeds: Tuple[tf.Tensor] | None = None
+
+
+@dataclass
+class TFBlipOutput(ModelOutput):
+ """
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for image-text similarity.
+ logits_per_image:(`tf.Tensor` of shape `(image_batch_size, text_batch_size)`):
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
+ similarity scores.
+ logits_per_text:(`tf.Tensor` of shape `(text_batch_size, image_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
+ similarity scores.
+ text_embeds(`tf.Tensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`].
+ image_embeds(`tf.Tensor` of shape `(batch_size, output_dim`):
+ The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`].
+ text_model_output(`BaseModelOutputWithPooling`):
+ The output of the [`BlipTextModel`].
+ vision_model_output(`BaseModelOutputWithPooling`):
+ The output of the [`BlipVisionModel`].
+ """
+
+ loss: tf.Tensor | None = None
+ logits_per_image: tf.Tensor = None
+ logits_per_text: tf.Tensor = None
+ text_embeds: tf.Tensor = None
+ image_embeds: tf.Tensor = None
+ text_model_output: TFBaseModelOutputWithPooling = None
+ vision_model_output: TFBaseModelOutputWithPooling = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+class TFBlipVisionEmbeddings(keras.layers.Layer):
+ def __init__(self, config: BlipVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.patch_embedding = keras.layers.Conv2D(
+ filters=self.embed_dim,
+ kernel_size=self.patch_size,
+ strides=self.patch_size,
+ kernel_initializer=get_initializer(self.config.initializer_range),
+ data_format="channels_last",
+ name="patch_embedding",
+ )
+
+ self.num_patches = (self.image_size // self.patch_size) ** 2
+ self.num_positions = self.num_patches + 1
+
+ def build(self, input_shape=None):
+ self.class_embedding = self.add_weight(
+ shape=(1, 1, self.embed_dim),
+ initializer=get_initializer(self.config.initializer_range),
+ trainable=True,
+ name="class_embedding",
+ )
+
+ self.position_embedding = self.add_weight(
+ shape=(1, self.num_positions, self.embed_dim),
+ initializer=get_initializer(self.config.initializer_range),
+ trainable=True,
+ name="position_embedding",
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "patch_embedding", None) is not None:
+ with tf.name_scope(self.patch_embedding.name):
+ self.patch_embedding.build([None, None, None, 3])
+
+ def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
+ # Input is channels-first, we transpose. PyTorch transposes after the conv because PyTorch
+ # likes channels-first convs.
+ batch_size = tf.shape(pixel_values)[0]
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
+ patch_embeds = self.patch_embedding(pixel_values)
+ patch_embeds = tf.reshape(patch_embeds, (batch_size, self.num_patches, -1))
+
+ class_embeds = tf.broadcast_to(self.class_embedding, (batch_size, 1, self.embed_dim))
+ embeddings = tf.concat([class_embeds, patch_embeds], axis=1)
+ embeddings = embeddings + self.position_embedding[:, : tf.shape(embeddings)[1], :]
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextEmbeddings with CLIP->Blip
+class TFBlipTextEmbeddings(keras.layers.Layer):
+ def __init__(self, config: BlipTextConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embed_dim = config.hidden_size
+
+ self.config = config
+
+ def build(self, input_shape: tf.TensorShape = None):
+ with tf.name_scope("token_embedding"):
+ self.weight = self.add_weight(
+ shape=(self.config.vocab_size, self.embed_dim),
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
+ trainable=True,
+ name="weight",
+ )
+
+ with tf.name_scope("position_embedding"):
+ self.position_embedding = self.add_weight(
+ shape=(self.config.max_position_embeddings, self.embed_dim),
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
+ trainable=True,
+ name="embeddings",
+ )
+
+ super().build(input_shape)
+
+ def call(
+ self,
+ input_ids: tf.Tensor = None,
+ position_ids: tf.Tensor = None,
+ inputs_embeds: tf.Tensor = None,
+ ) -> tf.Tensor:
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+
+ position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)
+ position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
+ final_embeddings = inputs_embeds + position_embeds
+
+ return final_embeddings
+
+
+class TFBlipAttention(keras.layers.Layer):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = keras.layers.Dropout(config.attention_dropout, name="dropout")
+
+ self.qkv = keras.layers.Dense(
+ 3 * self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="qkv"
+ )
+
+ self.projection = keras.layers.Dense(
+ self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="projection"
+ )
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ head_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = False,
+ training: Optional[bool] = None,
+ ) -> Tuple[tf.Tensor, tf.Tensor | None, Tuple[tf.Tensor] | None]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
+
+ mixed_qkv = self.qkv(hidden_states)
+ mixed_qkv = tf.reshape(mixed_qkv, (bsz, tgt_len, 3, self.num_heads, self.head_dim))
+ mixed_qkv = tf.transpose(mixed_qkv, perm=(2, 0, 3, 1, 4))
+
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = query_states @ tf.transpose(key_states, (0, 1, 3, 2))
+
+ attention_scores = attention_scores * self.scale
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = stable_softmax(attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = tf.transpose(attention_probs @ value_states, perm=(0, 2, 1, 3))
+
+ new_context_layer_shape = shape_list(context_layer)[:-2] + [self.embed_dim]
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
+
+ output = self.projection(context_layer)
+
+ outputs = (output, attention_probs) if output_attentions else (output, None)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+ if getattr(self, "qkv", None) is not None:
+ with tf.name_scope(self.qkv.name):
+ self.qkv.build([None, None, self.embed_dim])
+ if getattr(self, "projection", None) is not None:
+ with tf.name_scope(self.projection.name):
+ self.projection.build([None, None, self.embed_dim])
+
+
+class TFBlipMLP(keras.layers.Layer):
+ def __init__(self, config: BlipConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.activation_fn = get_tf_activation(config.hidden_act)
+
+ in_proj_std = (config.hidden_size**-0.5) * ((2 * config.num_hidden_layers) ** -0.5)
+ fc_std = (2 * config.hidden_size) ** -0.5
+
+ self.fc1 = keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(fc_std), name="fc1"
+ )
+ self.fc2 = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(in_proj_std), name="fc2"
+ )
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.fc1(inputs=hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(inputs=hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.config.hidden_size])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.intermediate_size])
+
+
+class TFBlipEncoderLayer(keras.layers.Layer):
+ def __init__(self, config: BlipConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.hidden_size
+ self.self_attn = TFBlipAttention(config, name="self_attn")
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
+ self.mlp = TFBlipMLP(config, name="mlp")
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ output_attentions: Optional[bool] = False,
+ training: Optional[bool] = None,
+ ) -> Tuple[tf.Tensor]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ head_mask=attention_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = hidden_states + residual
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+
+ hidden_states = hidden_states + residual
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "layer_norm1", None) is not None:
+ with tf.name_scope(self.layer_norm1.name):
+ self.layer_norm1.build([None, None, self.embed_dim])
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+ if getattr(self, "layer_norm2", None) is not None:
+ with tf.name_scope(self.layer_norm2.name):
+ self.layer_norm2.build([None, None, self.embed_dim])
+
+
+class TFBlipPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BlipConfig
+ base_model_prefix = "blip"
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
+
+
+BLIP_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`BlipConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BLIP_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+BLIP_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@keras_serializable
+class TFBlipEncoder(keras.layers.Layer):
+ config_class = BlipConfig
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`BlipEncoderLayer`].
+
+ Args:
+ config (`BlipConfig`):
+ The corresponding vision configuration for the `BlipEncoder`.
+ """
+
+ def __init__(self, config: BlipConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.layers = [TFBlipEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)]
+
+ @unpack_inputs
+ def call(
+ self,
+ inputs_embeds,
+ attention_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = None,
+ ) -> Union[Tuple, TFBaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Embedded representation of the inputs. Should be float, not int tokens.
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFBlipVisionModel(TFBlipPreTrainedModel):
+ main_input_name = "pixel_values"
+ config_class = BlipVisionConfig
+
+ def __init__(self, config: BlipVisionConfig, *args, **kwargs):
+ super().__init__(config, *args, **kwargs)
+ self.config = config
+
+ self.embeddings = TFBlipVisionEmbeddings(config, name="embeddings")
+ self.encoder = TFBlipEncoder(config, name="encoder")
+ self.post_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="post_layernorm")
+ self.embed_dim = config.hidden_size
+
+ def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
+ hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
+ attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=output.last_hidden_state,
+ pooler_output=output.pooler_output,
+ hidden_states=hs,
+ attentions=attns,
+ )
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=BlipVisionConfig)
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = None,
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ last_hidden_state = self.post_layernorm(last_hidden_state)
+
+ pooled_output = last_hidden_state[:, 0, :]
+ # TF gets confused if we call the layer with inputs of different ranks, so insert a singleton dimension
+ pooled_output = self.post_layernorm(tf.expand_dims(pooled_output, 1))
+ pooled_output = tf.squeeze(pooled_output, 1)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "post_layernorm", None) is not None:
+ with tf.name_scope(self.post_layernorm.name):
+ self.post_layernorm.build([None, None, self.embed_dim])
+
+
+class TFBlipMainLayer(keras.layers.Layer):
+ config_class = BlipConfig
+
+ def __init__(self, config: BlipConfig, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ if not isinstance(config.text_config, BlipTextConfig):
+ raise TypeError(
+ "config.text_config is expected to be of type BlipTextConfig but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.vision_config, BlipVisionConfig):
+ raise TypeError(
+ "config.vision_config is expected to be of type BlipVisionConfig but is of type"
+ f" {type(config.vision_config)}."
+ )
+
+ text_config = config.text_config
+ vision_config = config.vision_config
+
+ self.projection_dim = config.projection_dim
+ self.text_embed_dim = text_config.hidden_size
+ self.vision_embed_dim = vision_config.hidden_size
+
+ self.text_model = TFBlipTextModel(text_config, name="text_model")
+ self.vision_model = TFBlipVisionModel(vision_config, name="vision_model")
+
+ self.visual_projection = keras.layers.Dense(
+ self.projection_dim,
+ use_bias=False,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="visual_projection",
+ )
+ self.text_projection = keras.layers.Dense(
+ self.projection_dim,
+ use_bias=False,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="text_projection",
+ )
+
+ self.config = config
+
+ def build(self, input_shape=None):
+ self.logit_scale = self.add_weight(
+ name="logit_scale",
+ shape=[],
+ initializer=keras.initializers.Constant(self.config.logit_scale_init_value),
+ trainable=True,
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "text_model", None) is not None:
+ with tf.name_scope(self.text_model.name):
+ self.text_model.build(None)
+ if getattr(self, "vision_model", None) is not None:
+ with tf.name_scope(self.vision_model.name):
+ self.vision_model.build(None)
+ if getattr(self, "visual_projection", None) is not None:
+ with tf.name_scope(self.visual_projection.name):
+ self.visual_projection.build([None, None, self.vision_embed_dim])
+ if getattr(self, "text_projection", None) is not None:
+ with tf.name_scope(self.text_projection.name):
+ self.text_projection.build([None, None, self.text_embed_dim])
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: tf.Tensor | None = None,
+ pixel_values: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ position_ids: tf.Tensor | None = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = None,
+ ) -> Union[Tuple, TFBlipOutput]:
+ # Use BLIP model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ image_embeds = vision_outputs[1]
+ image_embeds = self.visual_projection(image_embeds)
+
+ text_embeds = text_outputs[1]
+ text_embeds = self.text_projection(text_embeds)
+
+ # normalized features
+ image_embeds = image_embeds / tf.norm(image_embeds, ord=2, axis=-1, keepdims=True)
+ text_embeds = text_embeds / tf.norm(text_embeds, ord=2, axis=-1, keepdims=True)
+
+ # cosine similarity as logits
+ logit_scale = tf.exp(self.logit_scale)
+ logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale
+ logits_per_image = tf.transpose(logits_per_text)
+
+ loss = None
+ if return_loss:
+ loss = blip_loss(logits_per_text)
+ loss = tf.reshape(loss, (1,))
+
+ if not return_dict:
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return TFBlipOutput(
+ loss=loss,
+ logits_per_image=logits_per_image,
+ logits_per_text=logits_per_text,
+ text_embeds=text_embeds,
+ image_embeds=image_embeds,
+ text_model_output=text_outputs,
+ vision_model_output=vision_outputs,
+ )
+
+
+class TFBlipModel(TFBlipPreTrainedModel):
+ config_class = BlipConfig
+ _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
+ main_input_name = "input_ids"
+
+ def __init__(self, config: BlipConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.blip = TFBlipMainLayer(config, name="blip")
+
+ def serving_output(self, output: TFBlipOutput) -> TFBlipOutput:
+ return TFBlipOutput(
+ logits_per_image=output.logits_per_image,
+ logits_per_text=output.logits_per_text,
+ text_embeds=output.text_embeds,
+ image_embeds=output.image_embeds,
+ )
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFBlipOutput, config_class=BlipConfig)
+ def call(
+ self,
+ input_ids: tf.Tensor | None = None,
+ pixel_values: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ position_ids: tf.Tensor | None = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = None,
+ ) -> Union[Tuple, TFBlipOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFBlipModel
+
+ >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True
+ ... )
+
+ >>> outputs = model(**inputs)
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
+ >>> probs = tf.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
+ ```"""
+ outputs = self.blip(
+ input_ids=input_ids,
+ pixel_values=pixel_values,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ return_loss=return_loss,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ return outputs
+
+ @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
+ def get_text_features(
+ self,
+ input_ids: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ position_ids: tf.Tensor | None = None,
+ return_dict: Optional[bool] = None,
+ ) -> tf.Tensor:
+ r"""
+ Returns:
+ text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
+ the projection layer to the pooled output of [`TFBlipTextModel`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor, TFBlipModel
+
+ >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
+ >>> text_features = model.get_text_features(**inputs)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ text_outputs = self.blip.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[1]
+ text_features = self.blip.text_projection(pooled_output)
+
+ return text_features
+
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
+ def get_image_features(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ return_dict: Optional[bool] = None,
+ ) -> tf.Tensor:
+ r"""
+ Returns:
+ image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying
+ the projection layer to the pooled output of [`TFBlipVisionModel`].
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFBlipModel
+
+ >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="tf")
+
+ >>> image_features = model.get_image_features(**inputs)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.blip.vision_model(pixel_values=pixel_values, return_dict=return_dict)
+
+ pooled_output = vision_outputs[1] # pooled_output
+ image_features = self.blip.visual_projection(pooled_output)
+
+ return image_features
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "blip", None) is not None:
+ with tf.name_scope(self.blip.name):
+ self.blip.build(None)
+
+
+@add_start_docstrings(
+ """
+ BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass
+ `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise,
+ the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption
+ from the text input. If no text input is provided, the decoder will start with the [BOS] token only.
+ """,
+ BLIP_START_DOCSTRING,
+)
+class TFBlipForConditionalGeneration(TFBlipPreTrainedModel):
+ config_class = BlipConfig
+ _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
+ main_input_name = "pixel_values"
+
+ def __init__(self, config: BlipConfig, *args, **kwargs):
+ super().__init__(config, *args, **kwargs)
+
+ self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model")
+
+ self.text_decoder = TFBlipTextLMHeadModel(config.text_config, name="text_decoder")
+
+ self.decoder_input_ids = config.text_config.bos_token_id
+ self.decoder_pad_token_id = config.text_config.pad_token_id
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.vision_model.embeddings.patch_embedding
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFBlipForConditionalGenerationModelOutput, config_class=BlipConfig)
+ def call(
+ self,
+ pixel_values: tf.Tensor,
+ input_ids: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = None,
+ ) -> Union[Tuple, TFBlipForConditionalGenerationModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFBlipForConditionalGeneration
+
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> model = TFBlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> text = "A picture of"
+
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
+
+ >>> outputs = model(**inputs)
+ ```"""
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ image_embeds = vision_outputs[0]
+
+ outputs = self.text_decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ labels=labels,
+ return_dict=False,
+ training=training,
+ )
+
+ if not return_dict:
+ outputs = (outputs[0], outputs[1], image_embeds, vision_outputs[0]) + vision_outputs[2:]
+ return tuple(output for output in outputs if output is not None)
+
+ if labels is not None:
+ loss = outputs[0]
+ logits = outputs[1]
+ else:
+ loss = None
+ logits = outputs[0]
+
+ if loss is not None and loss.shape.rank == 0:
+ loss = tf.reshape(loss, (1,))
+
+ return TFBlipForConditionalGenerationModelOutput(
+ loss=loss,
+ logits=logits,
+ image_embeds=image_embeds,
+ last_hidden_state=vision_outputs.last_hidden_state,
+ hidden_states=vision_outputs.hidden_states,
+ attentions=vision_outputs.attentions,
+ )
+
+ def generate(
+ self,
+ pixel_values: tf.Tensor,
+ input_ids: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ **generate_kwargs,
+ ) -> tf.Tensor:
+ r"""
+ Overrides *generate* function to be able to use the model as a conditional generator
+
+ Parameters:
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, image_height, image_width)`:
+ Input image to be processed
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The sequence used as a prompt for the generation.
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+
+ Examples:
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFBlipForConditionalGeneration
+
+ >>> model = TFBlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="tf")
+
+ >>> outputs = model.generate(**inputs)
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
+ two cats sleeping on a couch
+ ```
+ """
+
+ batch_size = pixel_values.shape[0]
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
+
+ image_embeds = vision_outputs[0]
+
+ image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int32)
+
+ if isinstance(input_ids, list):
+ input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int32)
+ elif input_ids is None:
+ input_ids = tf.convert_to_tensor(
+ [[self.decoder_input_ids, self.config.text_config.eos_token_id]], dtype=tf.int32
+ )
+
+ input_ids = tf.tile(input_ids, (batch_size, 1))
+
+ # PyTorch: input_ids[:, 0] = self.config.text_config.bos_token_id
+ input_ids = tf.concat(
+ [tf.ones((batch_size, 1), dtype=tf.int32) * self.config.text_config.bos_token_id, input_ids[:, 1:]], axis=1
+ )
+ attention_mask = attention_mask[:, :-1] if attention_mask is not None else None
+
+ outputs = self.text_decoder.generate(
+ input_ids=input_ids[:, :-1],
+ eos_token_id=self.config.text_config.sep_token_id,
+ pad_token_id=self.config.text_config.pad_token_id,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ **generate_kwargs,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "vision_model", None) is not None:
+ with tf.name_scope(self.vision_model.name):
+ self.vision_model.build(None)
+ if getattr(self, "text_decoder", None) is not None:
+ with tf.name_scope(self.text_decoder.name):
+ self.text_decoder.build(None)
+
+
+@add_start_docstrings(
+ """
+ BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text
+ decoder. The vision encoder will encode the input image, the text encoder will encode the input question together
+ with the encoding of the image, and the text decoder will output the answer to the question.
+ """,
+ BLIP_START_DOCSTRING,
+)
+class TFBlipForQuestionAnswering(TFBlipPreTrainedModel):
+ config_class = BlipConfig
+ _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
+
+ def __init__(self, config: BlipConfig, *args, **kwargs):
+ super().__init__(config, *args, **kwargs)
+
+ self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model")
+
+ self.text_encoder = TFBlipTextModel(config.text_config, name="text_encoder", add_pooling_layer=False)
+
+ self.text_decoder = TFBlipTextLMHeadModel(config.text_config, name="text_decoder")
+
+ self.decoder_pad_token_id = config.text_config.pad_token_id
+ self.decoder_start_token_id = config.text_config.bos_token_id
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.vision_model.embeddings.patch_embedding
+
+ # Adapted from transformers.models.t5.modeling_tf_t5.TFT5PreTrainedModel._shift_right
+ def _shift_right(self, input_ids):
+ decoder_start_token_id = self.decoder_start_token_id
+ pad_token_id = self.decoder_pad_token_id
+
+ if decoder_start_token_id is None or pad_token_id is None:
+ raise ValueError("decoder_start_token_id and pad_token_id must be defined!")
+
+ start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
+ start_tokens = tf.cast(start_tokens, input_ids.dtype) # Ensure compatible dtypes for concatenation
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
+
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids = tf.where(
+ shifted_input_ids == -100,
+ tf.cast(tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids.dtype),
+ shifted_input_ids,
+ )
+
+ # "Verify that `labels` has only positive values and -100"
+ tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=shifted_input_ids.dtype))
+
+ return shifted_input_ids
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFBlipTextVisionModelOutput, config_class=BlipVisionConfig)
+ def call(
+ self,
+ input_ids: tf.Tensor,
+ pixel_values: tf.Tensor | None = None,
+ decoder_input_ids: tf.Tensor | None = None,
+ decoder_attention_mask: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = None,
+ ) -> Union[Tuple, TFBlipTextVisionModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFBlipForQuestionAnswering
+
+ >>> model = TFBlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> # training
+ >>> text = "How many cats are in the picture?"
+ >>> label = "2"
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
+ >>> labels = processor(text=label, return_tensors="tf").input_ids
+
+ >>> inputs["labels"] = labels
+ >>> outputs = model(**inputs)
+ >>> loss = outputs.loss
+
+ >>> # inference
+ >>> text = "How many cats are in the picture?"
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
+ >>> outputs = model.generate(**inputs)
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
+ 2
+ ```"""
+ if labels is None and decoder_input_ids is None:
+ raise ValueError(
+ "Either `decoder_input_ids` or `labels` should be passed when calling"
+ " `TFBlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you"
+ " are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`"
+ )
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ image_embeds = vision_outputs[0]
+ image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int64)
+
+ question_embeds = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
+
+ if labels is not None and decoder_input_ids is None:
+ # labels are already shifted right, see: https://github.com/huggingface/transformers/pull/23153
+ decoder_input_ids = labels
+
+ answer_output = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=question_embeds,
+ encoder_attention_mask=attention_mask,
+ labels=labels,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if labels is not None:
+ decoder_loss = tf.reduce_mean(answer_output.loss) if return_dict else tf.reduce_mean(answer_output[0])
+ else:
+ decoder_loss = None
+
+ if not return_dict:
+ outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:]
+ return tuple(output for output in outputs if output is not None)
+
+ return TFBlipTextVisionModelOutput(
+ loss=decoder_loss,
+ image_embeds=image_embeds,
+ last_hidden_state=vision_outputs.last_hidden_state,
+ hidden_states=vision_outputs.hidden_states,
+ attentions=vision_outputs.attentions,
+ )
+
+ def generate(
+ self,
+ input_ids: tf.Tensor,
+ pixel_values: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ **generate_kwargs,
+ ) -> tf.Tensor:
+ r"""
+ Overrides *generate* function to be able to use the model as a conditional generator
+
+ Parameters:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, image_height, image_width)`:
+ Input image to be processed
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for
+ tokens that are NOT MASKED, `0` for MASKED tokens.
+ generate_kwargs (dict, *optional*):
+ Additional arguments passed to the `generate` function of the decoder
+
+
+ Examples:
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFBlipForQuestionAnswering
+
+ >>> model = TFBlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> text = "How many cats are in the picture?"
+
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
+
+ >>> outputs = model.generate(**inputs)
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
+ 2
+ ```
+ """
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
+
+ image_embeds = vision_outputs[0]
+
+ image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int32)
+
+ if isinstance(input_ids, list):
+ input_ids = tf.Tensor(input_ids)
+
+ question_outputs = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ return_dict=False,
+ )
+
+ question_embeds = question_outputs[0]
+
+ question_attention_mask = tf.ones(shape_list(question_embeds)[:-1], dtype=tf.int32)
+
+ bos_ids = tf.fill(
+ (tf.shape(question_embeds)[0], 1), value=tf.cast(self.decoder_start_token_id, input_ids.dtype)
+ )
+
+ outputs = self.text_decoder.generate(
+ input_ids=bos_ids,
+ eos_token_id=self.config.text_config.sep_token_id,
+ pad_token_id=self.config.text_config.pad_token_id,
+ encoder_hidden_states=question_embeds,
+ encoder_attention_mask=question_attention_mask,
+ **generate_kwargs,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "vision_model", None) is not None:
+ with tf.name_scope(self.vision_model.name):
+ self.vision_model.build(None)
+ if getattr(self, "text_encoder", None) is not None:
+ with tf.name_scope(self.text_encoder.name):
+ self.text_encoder.build(None)
+ if getattr(self, "text_decoder", None) is not None:
+ with tf.name_scope(self.text_decoder.name):
+ self.text_decoder.build(None)
+
+
+@add_start_docstrings(
+ """
+ BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of
+ image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to
+ the image.
+ """,
+ BLIP_START_DOCSTRING,
+)
+class TFBlipForImageTextRetrieval(TFBlipPreTrainedModel):
+ config_class = BlipConfig
+
+ def __init__(self, config: BlipConfig, *args, **kwargs):
+ super().__init__(config, *args, **kwargs)
+
+ self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model")
+
+ self.text_encoder = TFBlipTextModel(config.text_config, name="text_encoder", add_pooling_layer=False)
+
+ # vision projection layer
+ self.vision_proj = keras.layers.Dense(
+ config.image_text_hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="vision_proj",
+ )
+
+ # text projection layer
+ self.text_proj = keras.layers.Dense(
+ config.image_text_hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="text_proj",
+ )
+
+ # image text matching head
+ self.itm_head = keras.layers.Dense(
+ 2, kernel_initializer=get_initializer(config.initializer_range), name="itm_head"
+ )
+
+ self.decoder_pad_token_id = (
+ config.text_config.pad_token_id
+ if not hasattr(config, "decoder_pad_token_id")
+ else config.decoder_pad_token_id
+ )
+ self.decoder_start_token_id = (
+ config.text_config.bos_token_id
+ if not hasattr(config, "decoder_start_token_id")
+ else config.decoder_start_token_id
+ )
+ self.config = config
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.vision_model.embeddings.patch_embedding
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFBlipImageTextMatchingModelOutput, config_class=BlipVisionConfig)
+ def call(
+ self,
+ input_ids: tf.Tensor,
+ pixel_values: tf.Tensor | None = None,
+ use_itm_head: Optional[bool] = True,
+ attention_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = None,
+ ) -> Union[Tuple, TFBlipImageTextMatchingModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFBlipForImageTextRetrieval
+
+ >>> model = TFBlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco")
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> text = "an image of a cat"
+
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
+ >>> outputs = model(**inputs)
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ image_embeds = vision_outputs[0]
+ image_atts = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int64)
+
+ # Matt: In PyTorch, only one path (itm/non-itm) is taken. However, in TensorFlow this can result in
+ # some layers not being built! To avoid this, we always call both paths, then use an if statement to select
+ # which output to pass to the final output. The unnecessary nodes will be pruned from the final graph, but
+ # not before the layers have all been built correctly.
+ itm_question_embeds = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_atts,
+ return_dict=return_dict,
+ training=training,
+ )
+ itm_question_embeds = itm_question_embeds[0] if not return_dict else itm_question_embeds.last_hidden_state
+
+ itm_output = self.itm_head(itm_question_embeds[:, 0, :])
+
+ no_itm_question_embeds = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ return_dict=return_dict,
+ training=training,
+ )
+ no_itm_question_embeds = (
+ no_itm_question_embeds[0] if not return_dict else no_itm_question_embeds.last_hidden_state
+ )
+
+ image_feat, _ = tf.linalg.normalize(self.vision_proj(image_embeds[:, 0, :]), ord=2, axis=-1)
+ text_feat, _ = tf.linalg.normalize(self.text_proj(no_itm_question_embeds[:, 0, :]), ord=2, axis=-1)
+
+ no_itm_output = tf.matmul(image_feat, text_feat, transpose_b=True)
+
+ if use_itm_head:
+ output = itm_output
+ question_embeds = itm_question_embeds
+ else:
+ output = no_itm_output
+ question_embeds = no_itm_question_embeds
+
+ if not return_dict:
+ outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,)
+ return tuple(output for output in outputs if output is not None)
+
+ return TFBlipImageTextMatchingModelOutput(
+ itm_score=output,
+ last_hidden_state=vision_outputs.last_hidden_state,
+ hidden_states=vision_outputs.hidden_states,
+ attentions=vision_outputs.attentions,
+ question_embeds=question_embeds,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "vision_model", None) is not None:
+ with tf.name_scope(self.vision_model.name):
+ self.vision_model.build(None)
+ if getattr(self, "text_encoder", None) is not None:
+ with tf.name_scope(self.text_encoder.name):
+ self.text_encoder.build(None)
+ if getattr(self, "vision_proj", None) is not None:
+ with tf.name_scope(self.vision_proj.name):
+ self.vision_proj.build([None, None, self.config.vision_config.hidden_size])
+ if getattr(self, "text_proj", None) is not None:
+ with tf.name_scope(self.text_proj.name):
+ self.text_proj.build([None, None, self.config.text_config.hidden_size])
+ if getattr(self, "itm_head", None) is not None:
+ with tf.name_scope(self.itm_head.name):
+ self.itm_head.build([None, None, self.config.text_config.hidden_size])
+
+
+__all__ = [
+ "TFBlipModel",
+ "TFBlipPreTrainedModel",
+ "TFBlipForConditionalGeneration",
+ "TFBlipForQuestionAnswering",
+ "TFBlipVisionModel",
+ "TFBlipTextModel",
+ "TFBlipForImageTextRetrieval",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..455f7ffec5dee0567039cfd844588cb991c15622
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_decision_transformer import *
+ from .modeling_decision_transformer import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e7ba76c611fc24f8e321342fe68e8f0343a48f6
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py b/janus/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..08c0f918c43578dd43405e9d68eb807cab0d8144
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py
@@ -0,0 +1,963 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch DecisionTransformer model."""
+
+import math
+import os
+from dataclasses import dataclass
+from typing import Callable, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
+from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_decision_transformer import DecisionTransformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "edbeeching/decision-transformer-gym-hopper-medium"
+_CONFIG_FOR_DOC = "DecisionTransformerConfig"
+
+
+# Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2
+def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
+ """Load tf checkpoints in a pytorch model"""
+ try:
+ import re
+
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(gpt2_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array.squeeze())
+
+ for name, array in zip(names, arrays):
+ name = name[6:] # skip "model/"
+ name = name.split("/")
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
+ scope_names = re.split(r"(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] == "w" or scope_names[0] == "g":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "b":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
+ pointer = getattr(pointer, scope_names[0])
+ pointer = getattr(pointer, "weight")
+ else:
+ pointer = getattr(pointer, scope_names[0])
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+ try:
+ if pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
+ except ValueError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ return model
+
+
+# Copied from transformers.models.gpt2.modeling_gpt2.eager_attention_forward
+def eager_attention_forward(module, query, key, value, attention_mask, head_mask=None, **kwargs):
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
+
+ if module.scale_attn_weights:
+ attn_weights = attn_weights / torch.full(
+ [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
+ )
+
+ # Layer-wise attention scaling
+ if module.scale_attn_by_inverse_layer_idx:
+ attn_weights = attn_weights / float(module.layer_idx + 1)
+
+ if not module.is_cross_attention:
+ # if only "normal" attention layer implements causal mask
+ query_length, key_length = query.size(-2), key.size(-2)
+ causal_mask = module.bias[:, :, key_length - query_length : key_length, :key_length]
+ mask_value = torch.finfo(attn_weights.dtype).min
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
+ attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value)
+
+ if attention_mask is not None:
+ # Apply the attention mask
+ attn_weights = attn_weights + attention_mask
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
+ attn_weights = attn_weights.type(value.dtype)
+ attn_weights = module.attn_dropout(attn_weights)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_weights = attn_weights * head_mask
+
+ attn_output = torch.matmul(attn_weights, value)
+ attn_output = attn_output.transpose(1, 2)
+
+ return attn_output, attn_weights
+
+
+# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention with GPT2->DecisionTransformerGPT2
+class DecisionTransformerGPT2Attention(nn.Module):
+ def __init__(self, config, is_cross_attention=False, layer_idx=None):
+ super().__init__()
+ self.config = config
+ max_positions = config.max_position_embeddings
+ self.register_buffer(
+ "bias",
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
+ 1, 1, max_positions, max_positions
+ ),
+ persistent=False,
+ )
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
+
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ self.split_size = self.embed_dim
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+
+ self.scale_attn_weights = config.scale_attn_weights
+ self.is_cross_attention = is_cross_attention
+
+ # Layer-wise attention scaling, reordering, and upcasting
+ self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
+ self.layer_idx = layer_idx
+ self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
+
+ if self.is_cross_attention:
+ self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
+ self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
+ else:
+ self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
+ self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
+
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
+ self.is_causal = True
+
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
+
+ # Prune conv1d layers
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
+
+ # Update hyper params
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
+ self.num_heads = self.num_heads - len(heads)
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
+ # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
+ bsz, num_heads, q_seq_len, dk = query.size()
+ _, _, k_seq_len, _ = key.size()
+
+ # Preallocate attn_weights for `baddbmm`
+ attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
+
+ # Compute Scale Factor
+ scale_factor = 1.0
+ if self.scale_attn_weights:
+ scale_factor /= float(value.size(-1)) ** 0.5
+
+ if self.scale_attn_by_inverse_layer_idx:
+ scale_factor /= float(self.layer_idx + 1)
+
+ # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
+ with torch.amp.autocast(query.device.type, enabled=False):
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
+
+ if not self.is_cross_attention:
+ # if only "normal" attention layer implements causal mask
+ query_length, key_length = query.size(-2), key.size(-2)
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
+ mask_value = torch.finfo(attn_weights.dtype).min
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
+
+ if attention_mask is not None:
+ # Apply the attention mask
+ attn_weights = attn_weights + attention_mask
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
+ if attn_weights.dtype != torch.float32:
+ raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
+ attn_weights = attn_weights.type(value.dtype)
+ attn_weights = self.attn_dropout(attn_weights)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_weights = attn_weights * head_mask
+
+ attn_output = torch.matmul(attn_weights, value)
+ attn_output = attn_output.transpose(1, 2)
+
+ return attn_output, attn_weights
+
+ def forward(
+ self,
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ **kwargs,
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
+ if encoder_hidden_states is not None:
+ if not hasattr(self, "q_attn"):
+ raise ValueError(
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
+ "Please make sure to instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`."
+ )
+
+ query_states = self.q_attn(hidden_states)
+ key_states, value_states = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
+ attention_mask = encoder_attention_mask
+ else:
+ query_states, key_states, value_states = self.c_attn(hidden_states).split(self.split_size, dim=2)
+
+ shape_q = (*query_states.shape[:-1], -1, self.head_dim)
+ shape_kv = (*key_states.shape[:-1], -1, self.head_dim)
+
+ query_states = query_states.view(shape_q).transpose(1, 2)
+ key_states = key_states.view(shape_kv).transpose(1, 2)
+ value_states = value_states.view(shape_kv).transpose(1, 2)
+
+ if layer_past is not None:
+ past_key, past_value = layer_past
+ key_states = torch.cat((past_key, key_states), dim=-2)
+ value_states = torch.cat((past_value, value_states), dim=-2)
+
+ if use_cache is True:
+ present = (key_states, value_states)
+ else:
+ present = None
+
+ is_cross_attention = encoder_hidden_states is not None
+ is_causal = attention_mask is None and query_states.shape[-2] > 1 and not is_cross_attention
+
+ using_eager = self.config._attn_implementation == "eager"
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ if self.config._attn_implementation == "sdpa" and (output_attentions or head_mask is not None):
+ using_eager = True
+ logger.warning_once(
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ else:
+ # Attention functions are consistent with previous equivalent attention classes, however they do not support some options
+ # (e.g. layer scaling, head mask) that eager supports. These implementations are thus equivalent to previous code, but
+ # not necessarily to eager (if mentionned options are provided).
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ if using_eager and self.reorder_and_upcast_attn:
+ attn_output, attn_weights = self._upcast_and_reordered_attn(
+ query_states, key_states, value_states, attention_mask, head_mask
+ )
+ else:
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ head_mask=head_mask,
+ dropout=self.attn_dropout.p if self.training else 0.0,
+ is_causal=is_causal,
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(*attn_output.shape[:-2], -1).contiguous()
+ attn_output = self.c_proj(attn_output)
+ attn_output = self.resid_dropout(attn_output)
+
+ outputs = (attn_output, present)
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs # a, present, (attentions)
+
+
+# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->DecisionTransformerGPT2
+class DecisionTransformerGPT2MLP(nn.Module):
+ def __init__(self, intermediate_size, config):
+ super().__init__()
+ embed_dim = config.hidden_size
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
+ self.act = ACT2FN[config.activation_function]
+ self.dropout = nn.Dropout(config.resid_pdrop)
+
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
+ hidden_states = self.c_fc(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.c_proj(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Block with GPT2->DecisionTransformerGPT2
+class DecisionTransformerGPT2Block(nn.Module):
+ # Ignore copy
+ def __init__(self, config, layer_idx=None):
+ super().__init__()
+ hidden_size = config.hidden_size
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
+
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
+ self.attn = DecisionTransformerGPT2Attention(config, layer_idx=layer_idx)
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
+
+ if config.add_cross_attention:
+ self.crossattention = DecisionTransformerGPT2Attention(
+ config, is_cross_attention=True, layer_idx=layer_idx
+ )
+ self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
+
+ self.mlp = DecisionTransformerGPT2MLP(inner_dim, config)
+
+ def forward(
+ self,
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
+ residual = hidden_states
+ hidden_states = self.ln_1(hidden_states)
+ attn_outputs = self.attn(
+ hidden_states,
+ layer_past=layer_past,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
+ outputs = attn_outputs[1:]
+ # residual connection
+ hidden_states = attn_output + residual
+
+ if encoder_hidden_states is not None:
+ # add one self-attention block for cross-attention
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
+ "cross-attention layers by setting `config.add_cross_attention=True`"
+ )
+ residual = hidden_states
+ hidden_states = self.ln_cross_attn(hidden_states)
+ cross_attn_outputs = self.crossattention(
+ hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ )
+ attn_output = cross_attn_outputs[0]
+ # residual connection
+ hidden_states = residual + attn_output
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
+
+ residual = hidden_states
+ hidden_states = self.ln_2(hidden_states)
+ feed_forward_hidden_states = self.mlp(hidden_states)
+ # residual connection
+ hidden_states = residual + feed_forward_hidden_states
+
+ if use_cache:
+ outputs = (hidden_states,) + outputs
+ else:
+ outputs = (hidden_states,) + outputs[1:]
+
+ return outputs # hidden_states, present, (attentions, cross_attentions)
+
+
+class DecisionTransformerGPT2PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DecisionTransformerConfig
+ load_tf_weights = load_tf_weights_in_gpt2
+ base_model_prefix = "transformer"
+ is_parallelizable = True
+ supports_gradient_checkpointing = True
+
+ def __init__(self, *inputs, **kwargs):
+ super().__init__(*inputs, **kwargs)
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear, Conv1D)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
+ #
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
+ for name, p in module.named_parameters():
+ if "c_proj" in name and "weight" in name:
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
+ p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
+
+
+class DecisionTransformerGPT2Model(DecisionTransformerGPT2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.embed_dim = config.hidden_size
+
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
+
+ self.drop = nn.Dropout(config.embd_pdrop)
+ self.h = nn.ModuleList(
+ [DecisionTransformerGPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]
+ )
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
+
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.wte
+
+ def set_input_embeddings(self, new_embeddings):
+ self.wte = new_embeddings
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ batch_size = input_ids.shape[0]
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ batch_size = inputs_embeds.shape[0]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
+
+ if past_key_values is None:
+ past_length = 0
+ past_key_values = tuple([None] * len(self.h))
+ else:
+ past_length = past_key_values[0][0].size(-2)
+ if position_ids is None:
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
+ position_ids = position_ids.unsqueeze(0)
+
+ # Attention mask.
+ if attention_mask is not None:
+ if batch_size <= 0:
+ raise ValueError("batch_size has to be defined and > 0")
+ attention_mask = attention_mask.view(batch_size, -1)
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask = attention_mask[:, None, None, :]
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and the dtype's smallest value for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # head_mask has shape n_layer x batch x n_heads x N x N
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.wte(input_ids)
+ position_embeds = self.wpe(position_ids)
+ hidden_states = inputs_embeds + position_embeds
+
+ if token_type_ids is not None:
+ token_type_embeds = self.wte(token_type_ids)
+ hidden_states = hidden_states + token_type_embeds
+
+ hidden_states = self.drop(hidden_states)
+
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ presents = () if use_cache else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+ all_hidden_states = () if output_hidden_states else None
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
+ # Model parallel
+ if self.model_parallel:
+ torch.cuda.set_device(hidden_states.device)
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
+ if layer_past is not None:
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
+ # Ensure that attention_mask is always on the same device as hidden_states
+ if attention_mask is not None:
+ attention_mask = attention_mask.to(hidden_states.device)
+ if isinstance(head_mask, torch.Tensor):
+ head_mask = head_mask.to(hidden_states.device)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ outputs = self._gradient_checkpointing_func(
+ block.__call__,
+ hidden_states,
+ None,
+ attention_mask,
+ head_mask[i],
+ encoder_hidden_states,
+ encoder_attention_mask,
+ use_cache,
+ output_attentions,
+ )
+ else:
+ outputs = block(
+ hidden_states,
+ layer_past=layer_past,
+ attention_mask=attention_mask,
+ head_mask=head_mask[i],
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = outputs[0]
+ if use_cache is True:
+ presents = presents + (outputs[1],)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
+
+ # Model Parallel: If it's the last layer for that device, put things on the next device
+ if self.model_parallel:
+ for k, v in self.device_map.items():
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
+
+ hidden_states = self.ln_f(hidden_states)
+
+ hidden_states = hidden_states.view(output_shape)
+ # Add last hidden state
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
+ if v is not None
+ )
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=presents,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@dataclass
+class DecisionTransformerOutput(ModelOutput):
+ """
+ Base class for model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ state_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, state_dim)`):
+ Environment state predictions
+ action_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, action_dim)`):
+ Model action predictions
+ return_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, 1)`):
+ Predicted returns for each state
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ state_preds: torch.FloatTensor = None
+ action_preds: torch.FloatTensor = None
+ return_preds: torch.FloatTensor = None
+ hidden_states: torch.FloatTensor = None
+ attentions: torch.FloatTensor = None
+ last_hidden_state: torch.FloatTensor = None
+
+
+class DecisionTransformerPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DecisionTransformerConfig
+ base_model_prefix = "decision_transformer"
+ main_input_name = "states"
+ supports_gradient_checkpointing = False
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+DECISION_TRANSFORMER_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`~DecisionTransformerConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DECISION_TRANSFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ states (`torch.FloatTensor` of shape `(batch_size, episode_length, state_dim)`):
+ The states for each step in the trajectory
+ actions (`torch.FloatTensor` of shape `(batch_size, episode_length, act_dim)`):
+ The actions taken by the "expert" policy for the current state, these are masked for auto regressive
+ prediction
+ rewards (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
+ The rewards for each state, action
+ returns_to_go (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
+ The returns for each state in the trajectory
+ timesteps (`torch.LongTensor` of shape `(batch_size, episode_length)`):
+ The timestep for each step in the trajectory
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, episode_length)`):
+ Masking, used to mask the actions when performing autoregressive prediction
+"""
+
+
+@add_start_docstrings("The Decision Transformer Model", DECISION_TRANSFORMER_START_DOCSTRING)
+class DecisionTransformerModel(DecisionTransformerPreTrainedModel):
+ """
+
+ The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL
+ setting. Refer to the paper for more details: https://arxiv.org/abs/2106.01345
+
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+ self.hidden_size = config.hidden_size
+ # note: the only difference between this GPT2Model and the default Huggingface version
+ # is that the positional embeddings are removed (since we'll add those ourselves)
+ self.encoder = DecisionTransformerGPT2Model(config)
+
+ self.embed_timestep = nn.Embedding(config.max_ep_len, config.hidden_size)
+ self.embed_return = torch.nn.Linear(1, config.hidden_size)
+ self.embed_state = torch.nn.Linear(config.state_dim, config.hidden_size)
+ self.embed_action = torch.nn.Linear(config.act_dim, config.hidden_size)
+
+ self.embed_ln = nn.LayerNorm(config.hidden_size)
+
+ # note: we don't predict states or returns for the paper
+ self.predict_state = torch.nn.Linear(config.hidden_size, config.state_dim)
+ self.predict_action = nn.Sequential(
+ *([nn.Linear(config.hidden_size, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []))
+ )
+ self.predict_return = torch.nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DECISION_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=DecisionTransformerOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ states: Optional[torch.FloatTensor] = None,
+ actions: Optional[torch.FloatTensor] = None,
+ rewards: Optional[torch.FloatTensor] = None,
+ returns_to_go: Optional[torch.FloatTensor] = None,
+ timesteps: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], DecisionTransformerOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import DecisionTransformerModel
+ >>> import torch
+
+ >>> model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium")
+ >>> # evaluation
+ >>> model = model.to(device)
+ >>> model.eval()
+
+ >>> env = gym.make("Hopper-v3")
+ >>> state_dim = env.observation_space.shape[0]
+ >>> act_dim = env.action_space.shape[0]
+
+ >>> state = env.reset()
+ >>> states = torch.from_numpy(state).reshape(1, 1, state_dim).to(device=device, dtype=torch.float32)
+ >>> actions = torch.zeros((1, 1, act_dim), device=device, dtype=torch.float32)
+ >>> rewards = torch.zeros(1, 1, device=device, dtype=torch.float32)
+ >>> target_return = torch.tensor(TARGET_RETURN, dtype=torch.float32).reshape(1, 1)
+ >>> timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1)
+ >>> attention_mask = torch.zeros(1, 1, device=device, dtype=torch.float32)
+
+ >>> # forward pass
+ >>> with torch.no_grad():
+ ... state_preds, action_preds, return_preds = model(
+ ... states=states,
+ ... actions=actions,
+ ... rewards=rewards,
+ ... returns_to_go=target_return,
+ ... timesteps=timesteps,
+ ... attention_mask=attention_mask,
+ ... return_dict=False,
+ ... )
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ batch_size, seq_length = states.shape[0], states.shape[1]
+
+ if attention_mask is None:
+ # attention mask for GPT: 1 if can be attended to, 0 if not
+ attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
+
+ # embed each modality with a different head
+ state_embeddings = self.embed_state(states)
+ action_embeddings = self.embed_action(actions)
+ returns_embeddings = self.embed_return(returns_to_go)
+ time_embeddings = self.embed_timestep(timesteps)
+
+ # time embeddings are treated similar to positional embeddings
+ state_embeddings = state_embeddings + time_embeddings
+ action_embeddings = action_embeddings + time_embeddings
+ returns_embeddings = returns_embeddings + time_embeddings
+
+ # this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)
+ # which works nice in an autoregressive sense since states predict actions
+ stacked_inputs = (
+ torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1)
+ .permute(0, 2, 1, 3)
+ .reshape(batch_size, 3 * seq_length, self.hidden_size)
+ )
+ stacked_inputs = self.embed_ln(stacked_inputs)
+
+ # to make the attention mask fit the stacked inputs, have to stack it as well
+ stacked_attention_mask = (
+ torch.stack((attention_mask, attention_mask, attention_mask), dim=1)
+ .permute(0, 2, 1)
+ .reshape(batch_size, 3 * seq_length)
+ )
+ device = stacked_inputs.device
+ # we feed in the input embeddings (not word indices as in NLP) to the model
+ encoder_outputs = self.encoder(
+ inputs_embeds=stacked_inputs,
+ attention_mask=stacked_attention_mask,
+ position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ x = encoder_outputs[0]
+
+ # reshape x so that the second dimension corresponds to the original
+ # returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t
+ x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)
+
+ # get predictions
+ return_preds = self.predict_return(x[:, 2]) # predict next return given state and action
+ state_preds = self.predict_state(x[:, 2]) # predict next state given state and action
+ action_preds = self.predict_action(x[:, 1]) # predict next action given state
+ if not return_dict:
+ return (state_preds, action_preds, return_preds)
+
+ return DecisionTransformerOutput(
+ last_hidden_state=encoder_outputs.last_hidden_state,
+ state_preds=state_preds,
+ action_preds=action_preds,
+ return_preds=return_preds,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+__all__ = [
+ "DecisionTransformerGPT2Model",
+ "DecisionTransformerGPT2PreTrainedModel",
+ "DecisionTransformerModel",
+ "DecisionTransformerPreTrainedModel",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/ernie/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/ernie/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bb8983063ddb0117e8b0d7cd6603aa6ac3056b6
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/ernie/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_ernie import *
+ from .modeling_ernie import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48b75c499506f6ec609100907b48697bb47c5a08
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..da0194e5155306eec4730c35fca2ad75e318d954
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20b26d27eb1368d42bb8b360752a34b127e0493c
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py b/janus/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py
new file mode 100644
index 0000000000000000000000000000000000000000..655e40e163b59dac4f2cab5fe96265b2173478c1
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py
@@ -0,0 +1,163 @@
+# coding=utf-8
+# Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""ERNIE model configuration"""
+
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ErnieConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ErnieModel`] or a [`TFErnieModel`]. It is used to
+ instantiate a ERNIE model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the ERNIE
+ [nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the ERNIE model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
+ task_type_vocab_size (`int`, *optional*, defaults to 3):
+ The vocabulary size of the `task_type_ids` for ERNIE2.0/ERNIE3.0 model
+ use_task_id (`bool`, *optional*, defaults to `False`):
+ Whether or not the model support `task_type_ids`
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ Padding token id.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ classifier_dropout (`float`, *optional*):
+ The dropout ratio for the classification head.
+
+ Examples:
+
+ ```python
+ >>> from transformers import ErnieConfig, ErnieModel
+
+ >>> # Initializing a ERNIE nghuyong/ernie-3.0-base-zh style configuration
+ >>> configuration = ErnieConfig()
+
+ >>> # Initializing a model (with random weights) from the nghuyong/ernie-3.0-base-zh style configuration
+ >>> model = ErnieModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "ernie"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ task_type_vocab_size=3,
+ use_task_id=False,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ position_embedding_type="absolute",
+ use_cache=True,
+ classifier_dropout=None,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.task_type_vocab_size = task_type_vocab_size
+ self.use_task_id = use_task_id
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+ self.classifier_dropout = classifier_dropout
+
+
+class ErnieOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ("token_type_ids", dynamic_axis),
+ ("task_type_ids", dynamic_axis),
+ ]
+ )
+
+
+__all__ = ["ErnieConfig", "ErnieOnnxConfig"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py b/janus/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec090b712e4420723b27b9f7e19c5f6b4e440382
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py
@@ -0,0 +1,1815 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch ERNIE model."""
+
+import math
+import warnings
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...generation import GenerationMixin
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ NextSentencePredictorOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_ernie import ErnieConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "nghuyong/ernie-1.0-base-zh"
+_CONFIG_FOR_DOC = "ErnieConfig"
+
+
+class ErnieEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+ self.use_task_id = config.use_task_id
+ if config.use_task_id:
+ self.task_type_embeddings = nn.Embedding(config.task_type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ task_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ past_key_values_length: int = 0,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+
+ # add `task_type_id` for ERNIE model
+ if self.use_task_id:
+ if task_type_ids is None:
+ task_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+ task_type_embeddings = self.task_type_embeddings(task_type_ids)
+ embeddings += task_type_embeddings
+
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Ernie
+class ErnieSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in ErnieModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Ernie
+class ErnieSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+ERNIE_SELF_ATTENTION_CLASSES = {
+ "eager": ErnieSelfAttention,
+}
+
+
+# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Ernie,BERT->ERNIE
+class ErnieAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = ERNIE_SELF_ATTENTION_CLASSES[config._attn_implementation](
+ config, position_embedding_type=position_embedding_type
+ )
+ self.output = ErnieSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Ernie
+class ErnieIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Ernie
+class ErnieOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Ernie
+class ErnieLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = ErnieAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = ErnieAttention(config, position_embedding_type="absolute")
+ self.intermediate = ErnieIntermediate(config)
+ self.output = ErnieOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Ernie
+class ErnieEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([ErnieLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Ernie
+class ErniePooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Ernie
+class ErniePredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Ernie
+class ErnieLMPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = ErniePredictionHeadTransform(config)
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def _tie_weights(self):
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Ernie
+class ErnieOnlyMLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = ErnieLMPredictionHead(config)
+
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->Ernie
+class ErnieOnlyNSPHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, pooled_output):
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return seq_relationship_score
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->Ernie
+class ErniePreTrainingHeads(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = ErnieLMPredictionHead(config)
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, sequence_output, pooled_output):
+ prediction_scores = self.predictions(sequence_output)
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return prediction_scores, seq_relationship_score
+
+
+class ErniePreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ErnieConfig
+ base_model_prefix = "ernie"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+@dataclass
+# Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->Ernie
+class ErnieForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`ErnieForPreTraining`].
+
+ Args:
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
+ (classification) loss.
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ prediction_logits: torch.FloatTensor = None
+ seq_relationship_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+ERNIE_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`ErnieConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+ERNIE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ task_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Task type embedding is a special embedding to represent the characteristic of different tasks, such as
+ word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
+ assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
+ config.task_type_vocab_size-1]
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Ernie Model transformer outputting raw hidden-states without any specific head on top.",
+ ERNIE_START_DOCSTRING,
+)
+class ErnieModel(ErniePreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+ """
+
+ # Copied from transformers.models.clap.modeling_clap.ClapTextModel.__init__ with ClapText->Ernie
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = ErnieEmbeddings(config)
+ self.encoder = ErnieEncoder(config)
+
+ self.pooler = ErniePooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel.set_input_embeddings
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Ernie Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
+ sentence prediction (classification)` head.
+ """,
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForPreTraining(ErniePreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
+
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.ernie = ErnieModel(config)
+ self.cls = ErniePreTrainingHeads(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+ self.cls.predictions.bias = new_embeddings.bias
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=ErnieForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ next_sentence_label: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], ErnieForPreTrainingOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
+ pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
+
+ - 0 indicates sequence B is a continuation of sequence A,
+ - 1 indicates sequence B is a random sequence.
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
+ Used to hide legacy arguments that have been deprecated.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ErnieForPreTraining
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
+ >>> model = ErnieForPreTraining.from_pretrained("nghuyong/ernie-1.0-base-zh")
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> prediction_logits = outputs.prediction_logits
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output, pooled_output = outputs[:2]
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
+
+ total_loss = None
+ if labels is not None and next_sentence_label is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
+ total_loss = masked_lm_loss + next_sentence_loss
+
+ if not return_dict:
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return ErnieForPreTrainingOutput(
+ loss=total_loss,
+ prediction_logits=prediction_scores,
+ seq_relationship_logits=seq_relationship_score,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """Ernie Model with a `language modeling` head on top for CLM fine-tuning.""", ERNIE_START_DOCSTRING
+)
+class ErnieForCausalLM(ErniePreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
+
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.__init__ with BertLMHeadModel->ErnieForCausalLM,Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+
+ if not config.is_decoder:
+ logger.warning("If you want to use `ErnieForCausalLM` as a standalone, add `is_decoder=True.`")
+
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
+ self.cls = ErnieOnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+ self.cls.predictions.bias = new_embeddings.bias
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutputWithCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.Tensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ lm_loss = None
+ if labels is not None:
+ # we are doing next-token prediction; shift prediction scores and input ids by one
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
+ labels = labels[:, 1:].contiguous()
+ loss_fct = CrossEntropyLoss()
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((lm_loss,) + output) if lm_loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=lm_loss,
+ logits=prediction_scores,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel._reorder_cache
+ def _reorder_cache(self, past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings("""Ernie Model with a `language modeling` head on top.""", ERNIE_START_DOCSTRING)
+class ErnieForMaskedLM(ErniePreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
+
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `ErnieForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
+ self.cls = ErnieOnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+ self.cls.predictions.bias = new_embeddings.bias
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="'paris'",
+ expected_loss=0.88,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
+ input_shape = input_ids.shape
+ effective_batch_size = input_shape[0]
+
+ # add a dummy token
+ if self.config.pad_token_id is None:
+ raise ValueError("The PAD token should be defined for generation")
+
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
+ dummy_token = torch.full(
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
+ )
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
+
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
+
+
+@add_start_docstrings(
+ """Ernie Model with a `next sentence prediction (classification)` head on top.""",
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForNextSentencePrediction(ErniePreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForNextSentencePrediction.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.ernie = ErnieModel(config)
+ self.cls = ErnieOnlyNSPHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
+
+ - 0 indicates sequence B is a continuation of sequence A,
+ - 1 indicates sequence B is a random sequence.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ErnieForNextSentencePrediction
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
+ >>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
+
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
+ >>> logits = outputs.logits
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
+ ```
+ """
+
+ if "next_sentence_label" in kwargs:
+ warnings.warn(
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
+ " `labels` instead.",
+ FutureWarning,
+ )
+ labels = kwargs.pop("next_sentence_label")
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ seq_relationship_scores = self.cls(pooled_output)
+
+ next_sentence_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
+
+ if not return_dict:
+ output = (seq_relationship_scores,) + outputs[2:]
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
+
+ return NextSentencePredictorOutput(
+ loss=next_sentence_loss,
+ logits=seq_relationship_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Ernie Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForSequenceClassification(ErniePreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.ernie = ErnieModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Ernie Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForMultipleChoice(ErniePreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.ernie = ErnieModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Ernie Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForTokenClassification(ErniePreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Ernie Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForQuestionAnswering(ErniePreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+__all__ = [
+ "ErnieForCausalLM",
+ "ErnieForMaskedLM",
+ "ErnieForMultipleChoice",
+ "ErnieForNextSentencePrediction",
+ "ErnieForPreTraining",
+ "ErnieForQuestionAnswering",
+ "ErnieForSequenceClassification",
+ "ErnieForTokenClassification",
+ "ErnieModel",
+ "ErniePreTrainedModel",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/falcon/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/falcon/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9789767f11402264660b5dec0b5cae2466ee9d8
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/falcon/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_falcon import *
+ from .modeling_falcon import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/configuration_falcon.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/configuration_falcon.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ee625ed37f2da6ec82d9b95d998683f1b84a0926
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/configuration_falcon.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..377b2843bc06f76accf02f29cdb90b147659b769
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7aec737afdc35ffd0a9b3e6c6f70c9a8d9b4bc3c
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/modeling_fuyu.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..905f00131d87ebba76d3834d3a9914dbc0ec3853
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/fuyu/__pycache__/processing_fuyu.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/fuyu/configuration_fuyu.py b/janus/lib/python3.10/site-packages/transformers/models/fuyu/configuration_fuyu.py
new file mode 100644
index 0000000000000000000000000000000000000000..23c3d88a8ec1f40f227be4ad299243ef34ef1b10
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/fuyu/configuration_fuyu.py
@@ -0,0 +1,210 @@
+# coding=utf-8
+# Copyright 2023 Adept AI and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fuyu model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..auto import CONFIG_MAPPING
+
+
+logger = logging.get_logger(__name__)
+
+
+class FuyuConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`FuyuForCausalLM`]. It is used to instantiate an
+ Fuyu model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the
+ [adept/fuyu-8b](https://huggingface.co/adept/fuyu-8b).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 262144):
+ Vocabulary size of the Fuyu model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`FuyuForCausalLM`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 16384):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 36):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 64):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 16384):
+ The maximum sequence length that this model might ever be used with.
+ image_size (`int`, *optional*, defaults to 300):
+ The input image size.
+ patch_size (`int`, *optional*, defaults to 30):
+ The input vision transformer encoding patch size.
+ num_channels (`int`, *optional*, defaults to 3):
+ The input image number of channels.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie input and output embeddings.
+ rope_theta (`float`, *optional*, defaults to 25000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
+ these scaling strategies behave:
+ https://www.reddit.com/r/LocalFuyu/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
+ experimental feature, subject to breaking API changes in future versions.
+ qk_layernorm (`bool`, *optional*, defaults to `True`):
+ Whether or not to normalize the Queries and Keys after projecting the hidden states
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio after applying the MLP to the hidden states.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio after computing the attention scores.
+ partial_rotary_factor (`float`, *optional*, defaults to 0.5):
+ Percentage of the query and keys which will have rotary embedding.
+
+ pad_token_id (`int`, *optional*):
+ The id of the *padding* token.
+ bos_token_id (`int`, *optional*, defaults to 1):
+ The id of the *beginning-of-sequence* token.
+ eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2):
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
+ text_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize the `language``[`Aut`].
+
+ ```python
+ >>> from transformers import FuyuConfig
+
+ >>> # Initializing a Fuyu fuyu-7b style configuration
+ >>> configuration = FuyuConfig()
+ ```"""
+
+ model_type = "fuyu"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=262144,
+ hidden_size=4096,
+ intermediate_size=16384,
+ num_hidden_layers=36,
+ num_attention_heads=64,
+ hidden_act="relu2",
+ max_position_embeddings=16384,
+ image_size=300,
+ patch_size=30,
+ num_channels=3,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ use_cache=True,
+ tie_word_embeddings=False,
+ rope_theta=25000.0,
+ rope_scaling=None,
+ qk_layernorm=True,
+ hidden_dropout=0.0,
+ attention_dropout=0.0,
+ partial_rotary_factor=0.5,
+ pad_token_id=None,
+ bos_token_id=1,
+ eos_token_id=2,
+ text_config=None,
+ **kwargs,
+ ):
+ if text_config is None:
+ text_config = {
+ "vocab_size": vocab_size,
+ "max_position_embeddings": max_position_embeddings,
+ "hidden_size": hidden_size,
+ "intermediate_size": intermediate_size,
+ "num_hidden_layers": num_hidden_layers,
+ "num_attention_heads": num_attention_heads,
+ "hidden_act": hidden_act,
+ "initializer_range": initializer_range,
+ "layer_norm_eps": layer_norm_eps,
+ "use_cache": use_cache,
+ "rope_theta": rope_theta,
+ "rope_scaling": rope_scaling,
+ "qk_layernorm": qk_layernorm,
+ "hidden_dropout": hidden_dropout,
+ "attention_dropout": attention_dropout,
+ "partial_rotary_factor": partial_rotary_factor,
+ "pad_token_id": pad_token_id,
+ "bos_token_id": bos_token_id,
+ "eos_token_id": eos_token_id,
+ "tie_word_embeddings": tie_word_embeddings,
+ }
+ logger.info("text_config is None. initializing the text model with default values.")
+ text_model_type = text_config["model_type"] if "model_type" in text_config else "persimmon"
+ self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
+
+ self._vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self.qk_layernorm = qk_layernorm
+ self.hidden_dropout = hidden_dropout
+ self.attention_dropout = attention_dropout
+ self.partial_rotary_factor = partial_rotary_factor
+ self._rope_scaling_validation()
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
+ )
+ rope_scaling_type = self.rope_scaling.get("type", None)
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
+
+
+__all__ = ["FuyuConfig"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/longformer/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/longformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..87f53105424b76e5c18bd740ecfdd37a5b29d0d4
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/longformer/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_longformer import *
+ from .modeling_longformer import *
+ from .modeling_tf_longformer import *
+ from .tokenization_longformer import *
+ from .tokenization_longformer_fast import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3fd5dc0a9ee0dfec6a44d44f0f88b5343161268e
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/longformer/modeling_tf_longformer.py b/janus/lib/python3.10/site-packages/transformers/models/longformer/modeling_tf_longformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f52ca658a7b6f97b15220e4072405ec8c5abe70
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/longformer/modeling_tf_longformer.py
@@ -0,0 +1,2786 @@
+# coding=utf-8
+# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tensorflow Longformer model."""
+
+from __future__ import annotations
+
+import warnings
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_longformer import LongformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "allenai/longformer-base-4096"
+_CONFIG_FOR_DOC = "LongformerConfig"
+
+LARGE_NEGATIVE = -1e8
+
+
+@dataclass
+class TFLongformerBaseModelOutput(ModelOutput):
+ """
+ Base class for Longformer's outputs, with potential hidden states, local and global attentions.
+
+ Args:
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ last_hidden_state: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerBaseModelOutputWithPooling(ModelOutput):
+ """
+ Base class for Longformer's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state of the first token of the sequence (classification token) further processed by a
+ Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
+ prediction (classification) objective during pretraining.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ last_hidden_state: tf.Tensor = None
+ pooler_output: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerMaskedLMOutput(ModelOutput):
+ """
+ Base class for masked language models outputs.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Masked language modeling (MLM) loss.
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerQuestionAnsweringModelOutput(ModelOutput):
+ """
+ Base class for outputs of question answering Longformer models.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-start scores (before SoftMax).
+ end_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-end scores (before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: tf.Tensor | None = None
+ start_logits: tf.Tensor = None
+ end_logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerSequenceClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerMultipleChoiceModelOutput(ModelOutput):
+ """
+ Base class for outputs of multiple choice models.
+
+ Args:
+ loss (`tf.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`tf.Tensor` of shape `(batch_size, num_choices)`):
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
+
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerTokenClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of token classification models.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
+ Classification loss.
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+def _compute_global_attention_mask(input_ids_shape, sep_token_indices, before_sep_token=True):
+ """
+ Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is
+ True` else after `sep_token_id`.
+ """
+ assert shape_list(sep_token_indices)[1] == 2, "`input_ids` should have two dimensions"
+ question_end_index = tf.reshape(sep_token_indices, (input_ids_shape[0], 3, 2))[:, 0, 1][:, None]
+ # bool attention mask with True in locations of global attention
+ attention_mask = tf.expand_dims(tf.range(input_ids_shape[1], dtype=tf.int64), axis=0)
+ attention_mask = tf.tile(attention_mask, (input_ids_shape[0], 1))
+ if before_sep_token is True:
+ question_end_index = tf.tile(question_end_index, (1, input_ids_shape[1]))
+ attention_mask = tf.cast(attention_mask < question_end_index, dtype=question_end_index.dtype)
+ else:
+ # last token is separation token and should not be counted and in the middle are two separation tokens
+ question_end_index = tf.tile(question_end_index + 1, (1, input_ids_shape[1]))
+ attention_mask = tf.cast(
+ attention_mask > question_end_index,
+ dtype=question_end_index.dtype,
+ ) * tf.cast(attention_mask < input_ids_shape[-1], dtype=question_end_index.dtype)
+
+ return attention_mask
+
+
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead with Roberta->Longformer
+class TFLongformerLMHead(keras.layers.Layer):
+ """Longformer Head for masked language modeling."""
+
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.dense = keras.layers.Dense(
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.act = get_tf_activation("gelu")
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = input_embeddings
+
+ def build(self, input_shape=None):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.hidden_size])
+
+ def get_output_embeddings(self):
+ return self.decoder
+
+ def set_output_embeddings(self, value):
+ self.decoder.weight = value
+ self.decoder.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+
+ # project back to size of vocabulary with bias
+ seq_length = shape_list(tensor=hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+class TFLongformerEmbeddings(keras.layers.Layer):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing and some extra casting.
+ """
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.padding_idx = 1
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.max_position_embeddings = config.max_position_embeddings
+ self.initializer_range = config.initializer_range
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("token_type_embeddings"):
+ self.token_type_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.config.type_vocab_size, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+ def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ input_ids: tf.Tensor
+ Returns: tf.Tensor
+ """
+ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
+ incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
+
+ return incremental_indices + self.padding_idx
+
+ def call(
+ self,
+ input_ids=None,
+ position_ids=None,
+ token_type_ids=None,
+ inputs_embeds=None,
+ past_key_values_length=0,
+ training=False,
+ ):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = tf.cast(tf.fill(dims=input_shape, value=0), tf.int64)
+
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = self.create_position_ids_from_input_ids(
+ input_ids=input_ids, past_key_values_length=past_key_values_length
+ )
+ else:
+ position_ids = tf.expand_dims(
+ tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1, dtype=tf.int64),
+ axis=0,
+ )
+
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Longformer
+class TFLongformerIntermediate(keras.layers.Layer):
+ def __init__(self, config: LongformerConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Longformer
+class TFLongformerOutput(keras.layers.Layer):
+ def __init__(self, config: LongformerConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Longformer
+class TFLongformerPooler(keras.layers.Layer):
+ def __init__(self, config: LongformerConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(inputs=first_token_tensor)
+
+ return pooled_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Longformer
+class TFLongformerSelfOutput(keras.layers.Layer):
+ def __init__(self, config: LongformerConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+class TFLongformerSelfAttention(keras.layers.Layer):
+ def __init__(self, config, layer_id, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads}"
+ )
+
+ self.num_heads = config.num_attention_heads
+ self.head_dim = int(config.hidden_size / config.num_attention_heads)
+ self.embed_dim = config.hidden_size
+ self.query = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="query",
+ )
+ self.key = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="key",
+ )
+ self.value = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="value",
+ )
+
+ # separate projection layers for tokens with global attention
+ self.query_global = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="query_global",
+ )
+ self.key_global = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="key_global",
+ )
+ self.value_global = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="value_global",
+ )
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
+ self.global_dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
+ self.layer_id = layer_id
+ attention_window = config.attention_window[self.layer_id]
+
+ assert (
+ attention_window % 2 == 0
+ ), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
+ assert (
+ attention_window > 0
+ ), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
+
+ self.one_sided_attn_window_size = attention_window // 2
+
+ def build(self, input_shape=None):
+ if not self.built:
+ with tf.name_scope("query_global"):
+ self.query_global.build((self.config.hidden_size,))
+ with tf.name_scope("key_global"):
+ self.key_global.build((self.config.hidden_size,))
+ with tf.name_scope("value_global"):
+ self.value_global.build((self.config.hidden_size,))
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.config.hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.config.hidden_size])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build([None, None, self.config.hidden_size])
+ if getattr(self, "query_global", None) is not None:
+ with tf.name_scope(self.query_global.name):
+ self.query_global.build([None, None, self.config.hidden_size])
+ if getattr(self, "key_global", None) is not None:
+ with tf.name_scope(self.key_global.name):
+ self.key_global.build([None, None, self.config.hidden_size])
+ if getattr(self, "value_global", None) is not None:
+ with tf.name_scope(self.value_global.name):
+ self.value_global.build([None, None, self.config.hidden_size])
+
+ def call(
+ self,
+ inputs,
+ training=False,
+ ):
+ """
+ LongformerSelfAttention expects *len(hidden_states)* to be multiple of *attention_window*. Padding to
+ *attention_window* happens in LongformerModel.forward to avoid redoing the padding on each layer.
+
+ The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to:
+
+ - -10000: no attention
+ - 0: local attention
+ - +10000: global attention
+ """
+ # retrieve input args
+ (
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ is_index_masked,
+ is_index_global_attn,
+ is_global_attn,
+ ) = inputs
+
+ # project hidden states
+ query_vectors = self.query(hidden_states)
+ key_vectors = self.key(hidden_states)
+ value_vectors = self.value(hidden_states)
+ batch_size, seq_len, embed_dim = shape_list(hidden_states)
+
+ tf.debugging.assert_equal(
+ embed_dim,
+ self.embed_dim,
+ message=f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}",
+ )
+
+ # normalize query
+ query_vectors /= tf.math.sqrt(tf.cast(self.head_dim, dtype=query_vectors.dtype))
+ query_vectors = tf.reshape(query_vectors, (batch_size, seq_len, self.num_heads, self.head_dim))
+ key_vectors = tf.reshape(key_vectors, (batch_size, seq_len, self.num_heads, self.head_dim))
+
+ # attn_probs = (batch_size, seq_len, num_heads, window*2+1)
+ attn_scores = self._sliding_chunks_query_key_matmul(
+ query_vectors, key_vectors, self.one_sided_attn_window_size
+ )
+
+ # values to pad for attention probs
+ remove_from_windowed_attention_mask = attention_mask != 0
+ # cast to fp32/fp16 then replace 1's with -inf
+ float_mask = tf.cast(remove_from_windowed_attention_mask, dtype=query_vectors.dtype) * LARGE_NEGATIVE
+
+ # diagonal mask with zeros everywhere and -inf inplace of padding
+ diagonal_mask = self._sliding_chunks_query_key_matmul(
+ tf.ones(shape_list(attention_mask)),
+ float_mask,
+ self.one_sided_attn_window_size,
+ )
+
+ # pad local attention probs
+ attn_scores += diagonal_mask
+
+ tf.debugging.assert_equal(
+ shape_list(attn_scores),
+ [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1],
+ message=(
+ f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads},"
+ f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {shape_list(attn_scores)}"
+ ),
+ )
+
+ # compute global attn indices required through out forward fn
+ (
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ ) = self._get_global_attn_indices(is_index_global_attn)
+
+ # this function is only relevant for global attention
+ if is_global_attn:
+ attn_scores = self._concat_with_global_key_attn_probs(
+ attn_scores=attn_scores,
+ query_vectors=query_vectors,
+ key_vectors=key_vectors,
+ max_num_global_attn_indices=max_num_global_attn_indices,
+ is_index_global_attn_nonzero=is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
+ )
+
+ attn_probs = stable_softmax(attn_scores, axis=-1)
+
+ # softmax sometimes inserts NaN if all positions are masked, replace them with 0
+ # Make sure to create a mask with the proper shape:
+ # if is_global_attn==True => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1]
+ # if is_global_attn==False => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1]
+ if is_global_attn:
+ masked_index = tf.tile(
+ is_index_masked[:, :, None, None],
+ (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1),
+ )
+ else:
+ masked_index = tf.tile(
+ is_index_masked[:, :, None, None],
+ (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + 1),
+ )
+ attn_probs = tf.where(
+ masked_index,
+ tf.zeros(shape_list(masked_index), dtype=attn_probs.dtype),
+ attn_probs,
+ )
+
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+
+ attn_probs = tf.reshape(layer_head_mask, (1, 1, -1, 1)) * attn_probs
+
+ # apply dropout
+ attn_probs = self.dropout(attn_probs, training=training)
+ value_vectors = tf.reshape(value_vectors, (batch_size, seq_len, self.num_heads, self.head_dim))
+
+ # if global attention, compute sum of global and local attn
+
+ if is_global_attn:
+ attn_output = self._compute_attn_output_with_global_indices(
+ value_vectors=value_vectors,
+ attn_probs=attn_probs,
+ max_num_global_attn_indices=max_num_global_attn_indices,
+ is_index_global_attn_nonzero=is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
+ )
+ else:
+ attn_output = self._sliding_chunks_matmul_attn_probs_value(
+ attn_probs, value_vectors, self.one_sided_attn_window_size
+ )
+
+ tf.debugging.assert_equal(
+ shape_list(attn_output), [batch_size, seq_len, self.num_heads, self.head_dim], message="Unexpected size"
+ )
+
+ attn_output = tf.reshape(attn_output, (batch_size, seq_len, embed_dim))
+
+ # compute value for global attention and overwrite to attention output
+ if is_global_attn:
+ attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(
+ attn_output=attn_output,
+ hidden_states=hidden_states,
+ max_num_global_attn_indices=max_num_global_attn_indices,
+ layer_head_mask=layer_head_mask,
+ is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
+ is_index_global_attn_nonzero=is_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
+ is_index_masked=is_index_masked,
+ training=training,
+ )
+ else:
+ # Leave attn_output unchanged
+ global_attn_probs = tf.zeros((batch_size, self.num_heads, max_num_global_attn_indices, seq_len))
+
+ # make sure that local attention probabilities are set to 0 for indices of global attn
+ # Make sure to create a mask with the proper shape:
+ # if is_global_attn==True => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1]
+ # if is_global_attn==False => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1]
+ if is_global_attn:
+ masked_global_attn_index = tf.tile(
+ is_index_global_attn[:, :, None, None],
+ (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1),
+ )
+ else:
+ masked_global_attn_index = tf.tile(
+ is_index_global_attn[:, :, None, None],
+ (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + 1),
+ )
+ attn_probs = tf.where(
+ masked_global_attn_index,
+ tf.zeros(shape_list(masked_global_attn_index), dtype=attn_probs.dtype),
+ attn_probs,
+ )
+
+ outputs = (attn_output, attn_probs, global_attn_probs)
+
+ return outputs
+
+ def _sliding_chunks_query_key_matmul(self, query, key, window_overlap):
+ """
+ Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
+ implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an
+ overlap of size window_overlap
+ """
+ batch_size, seq_len, num_heads, head_dim = shape_list(query)
+
+ tf.debugging.assert_equal(
+ seq_len % (window_overlap * 2),
+ 0,
+ message=f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}",
+ )
+ tf.debugging.assert_equal(
+ shape_list(query),
+ shape_list(key),
+ message=(
+ f"Shape of query and key should be equal, but got query: {shape_list(query)} and key:"
+ f" {shape_list(key)}"
+ ),
+ )
+
+ chunks_count = seq_len // window_overlap - 1
+
+ # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
+ query = tf.reshape(
+ tf.transpose(query, (0, 2, 1, 3)),
+ (batch_size * num_heads, seq_len, head_dim),
+ )
+ key = tf.reshape(tf.transpose(key, (0, 2, 1, 3)), (batch_size * num_heads, seq_len, head_dim))
+ chunked_query = self._chunk(query, window_overlap)
+ chunked_key = self._chunk(key, window_overlap)
+
+ # matrix multiplication
+ # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
+ # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
+ # bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
+ chunked_query = tf.cast(chunked_query, dtype=chunked_key.dtype)
+ chunked_attention_scores = tf.einsum("bcxd,bcyd->bcxy", chunked_query, chunked_key) # multiply
+
+ # convert diagonals into columns
+ paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 1], [0, 0]])
+ diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(chunked_attention_scores, paddings)
+
+ # allocate space for the overall attention matrix where the chunks are combined. The last dimension
+ # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
+ # window_overlap previous words). The following column is attention score from each word to itself, then
+ # followed by window_overlap columns for the upper triangle.
+
+ # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
+ # - copying the main diagonal and the upper triangle
+ # TODO: This code is most likely not very efficient and should be improved
+ diagonal_attn_scores_up_triang = tf.concat(
+ [
+ diagonal_chunked_attention_scores[:, :, :window_overlap, : window_overlap + 1],
+ diagonal_chunked_attention_scores[:, -1:, window_overlap:, : window_overlap + 1],
+ ],
+ axis=1,
+ )
+
+ # - copying the lower triangle
+ diagonal_attn_scores_low_triang = tf.concat(
+ [
+ tf.zeros(
+ (batch_size * num_heads, 1, window_overlap, window_overlap),
+ dtype=diagonal_chunked_attention_scores.dtype,
+ ),
+ diagonal_chunked_attention_scores[:, :, -(window_overlap + 1) : -1, window_overlap + 1 :],
+ ],
+ axis=1,
+ )
+ diagonal_attn_scores_first_chunk = tf.concat(
+ [
+ tf.roll(
+ diagonal_chunked_attention_scores,
+ shift=[1, window_overlap],
+ axis=[2, 3],
+ )[:, :, :window_overlap, :window_overlap],
+ tf.zeros(
+ (batch_size * num_heads, 1, window_overlap, window_overlap),
+ dtype=diagonal_chunked_attention_scores.dtype,
+ ),
+ ],
+ axis=1,
+ )
+ first_chunk_mask = (
+ tf.tile(
+ tf.range(chunks_count + 1, dtype=tf.int64)[None, :, None, None],
+ (batch_size * num_heads, 1, window_overlap, window_overlap),
+ )
+ < 1
+ )
+ diagonal_attn_scores_low_triang = tf.where(
+ first_chunk_mask,
+ diagonal_attn_scores_first_chunk,
+ diagonal_attn_scores_low_triang,
+ )
+
+ # merging upper and lower triangle
+ diagonal_attention_scores = tf.concat(
+ [diagonal_attn_scores_low_triang, diagonal_attn_scores_up_triang], axis=-1
+ )
+
+ # separate batch_size and num_heads dimensions again
+ diagonal_attention_scores = tf.transpose(
+ tf.reshape(
+ diagonal_attention_scores,
+ (batch_size, num_heads, seq_len, 2 * window_overlap + 1),
+ ),
+ (0, 2, 1, 3),
+ )
+
+ diagonal_attention_scores = self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
+
+ return diagonal_attention_scores
+
+ @staticmethod
+ def _mask_invalid_locations(input_tensor, window_overlap):
+ # create correct upper triangle bool mask
+ mask_2d_upper = tf.reverse(
+ tf.linalg.band_part(tf.ones(shape=(window_overlap, window_overlap + 1)), -1, 0),
+ axis=[0],
+ )
+
+ # pad to full matrix
+ padding = tf.convert_to_tensor(
+ [[0, shape_list(input_tensor)[1] - window_overlap], [0, shape_list(input_tensor)[3] - window_overlap - 1]]
+ )
+
+ # create lower mask
+ mask_2d = tf.pad(mask_2d_upper, padding)
+
+ # combine with upper mask
+ mask_2d = mask_2d + tf.reverse(mask_2d, axis=[0, 1])
+
+ # broadcast to full matrix
+ mask_4d = tf.tile(mask_2d[None, :, None, :], (shape_list(input_tensor)[0], 1, 1, 1))
+
+ # inf tensor used for masking
+ inf_tensor = -float("inf") * tf.ones_like(input_tensor)
+
+ # mask
+ input_tensor = tf.where(tf.math.greater(mask_4d, 0), inf_tensor, input_tensor)
+
+ return input_tensor
+
+ def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_overlap):
+ """
+ Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
+ same shape as `attn_probs`
+ """
+
+ batch_size, seq_len, num_heads, head_dim = shape_list(value)
+
+ tf.debugging.assert_equal(
+ seq_len % (window_overlap * 2), 0, message="Seq_len has to be multiple of 2 * window_overlap"
+ )
+ tf.debugging.assert_equal(
+ shape_list(attn_probs)[:3],
+ shape_list(value)[:3],
+ message="value and attn_probs must have same dims (except head_dim)",
+ )
+ tf.debugging.assert_equal(
+ shape_list(attn_probs)[3],
+ 2 * window_overlap + 1,
+ message="attn_probs last dim has to be 2 * window_overlap + 1",
+ )
+
+ chunks_count = seq_len // window_overlap - 1
+
+ # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
+ chunked_attn_probs = tf.reshape(
+ tf.transpose(attn_probs, (0, 2, 1, 3)),
+ (
+ batch_size * num_heads,
+ seq_len // window_overlap,
+ window_overlap,
+ 2 * window_overlap + 1,
+ ),
+ )
+
+ # group batch_size and num_heads dimensions into one
+ value = tf.reshape(
+ tf.transpose(value, (0, 2, 1, 3)),
+ (batch_size * num_heads, seq_len, head_dim),
+ )
+
+ # pad seq_len with w at the beginning of the sequence and another window overlap at the end
+ paddings = tf.convert_to_tensor([[0, 0], [window_overlap, window_overlap], [0, 0]])
+ padded_value = tf.pad(value, paddings, constant_values=-1)
+
+ # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
+ frame_size = 3 * window_overlap * head_dim
+ frame_hop_size = (shape_list(padded_value)[1] * head_dim - frame_size) // chunks_count
+ chunked_value = tf.signal.frame(
+ tf.reshape(padded_value, (batch_size * num_heads, -1)),
+ frame_size,
+ frame_hop_size,
+ )
+ chunked_value = tf.reshape(
+ chunked_value,
+ (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim),
+ )
+
+ tf.debugging.assert_equal(
+ shape_list(chunked_value),
+ [batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim],
+ message="Chunked value has the wrong shape",
+ )
+
+ chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
+ context = tf.einsum("bcwd,bcdh->bcwh", chunked_attn_probs, chunked_value)
+ context = tf.transpose(
+ tf.reshape(context, (batch_size, num_heads, seq_len, head_dim)),
+ (0, 2, 1, 3),
+ )
+
+ return context
+
+ @staticmethod
+ def _pad_and_transpose_last_two_dims(hidden_states_padded, paddings):
+ """pads rows and then flips rows and columns"""
+ hidden_states_padded = tf.pad(
+ hidden_states_padded, paddings
+ ) # padding value is not important because it will be overwritten
+ batch_size, chunk_size, seq_length, hidden_dim = shape_list(hidden_states_padded)
+ hidden_states_padded = tf.reshape(hidden_states_padded, (batch_size, chunk_size, hidden_dim, seq_length))
+
+ return hidden_states_padded
+
+ @staticmethod
+ def _pad_and_diagonalize(chunked_hidden_states):
+ """
+ shift every row 1 step right, converting columns into diagonals.
+
+ Example:
+
+ ```python
+ chunked_hidden_states: [
+ 0.4983,
+ 2.6918,
+ -0.0071,
+ 1.0492,
+ -1.8348,
+ 0.7672,
+ 0.2986,
+ 0.0285,
+ -0.7584,
+ 0.4206,
+ -0.0405,
+ 0.1599,
+ 2.0514,
+ -1.1600,
+ 0.5372,
+ 0.2629,
+ ]
+ window_overlap = num_rows = 4
+ ```
+
+ (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
+ 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206,
+ -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
+ """
+ total_num_heads, num_chunks, window_overlap, hidden_dim = shape_list(chunked_hidden_states)
+ paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 0], [0, window_overlap + 1]])
+ chunked_hidden_states = tf.pad(
+ chunked_hidden_states, paddings
+ ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
+ chunked_hidden_states = tf.reshape(
+ chunked_hidden_states, (total_num_heads, num_chunks, -1)
+ ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap
+ chunked_hidden_states = chunked_hidden_states[
+ :, :, :-window_overlap
+ ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap
+ chunked_hidden_states = tf.reshape(
+ chunked_hidden_states,
+ (total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim),
+ ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap
+ chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
+
+ return chunked_hidden_states
+
+ @staticmethod
+ def _chunk(hidden_states, window_overlap):
+ """convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
+ batch_size, seq_length, hidden_dim = shape_list(hidden_states)
+ num_output_chunks = 2 * (seq_length // (2 * window_overlap)) - 1
+
+ # define frame size and frame stride (similar to convolution)
+ frame_hop_size = window_overlap * hidden_dim
+ frame_size = 2 * frame_hop_size
+ hidden_states = tf.reshape(hidden_states, (batch_size, seq_length * hidden_dim))
+
+ # chunk with overlap
+ chunked_hidden_states = tf.signal.frame(hidden_states, frame_size, frame_hop_size)
+
+ tf.debugging.assert_equal(
+ shape_list(chunked_hidden_states),
+ [batch_size, num_output_chunks, frame_size],
+ message=(
+ "Make sure chunking is correctly applied. `Chunked hidden states should have output dimension"
+ f" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}."
+ ),
+ )
+
+ chunked_hidden_states = tf.reshape(
+ chunked_hidden_states,
+ (batch_size, num_output_chunks, 2 * window_overlap, hidden_dim),
+ )
+
+ return chunked_hidden_states
+
+ @staticmethod
+ def _get_global_attn_indices(is_index_global_attn):
+ """compute global attn indices required throughout forward pass"""
+ # helper variable
+ num_global_attn_indices = tf.math.count_nonzero(is_index_global_attn, axis=1)
+ num_global_attn_indices = tf.cast(num_global_attn_indices, dtype=tf.constant(1).dtype)
+
+ # max number of global attn indices in batch
+ max_num_global_attn_indices = tf.reduce_max(num_global_attn_indices)
+
+ # indices of global attn
+ is_index_global_attn_nonzero = tf.where(is_index_global_attn)
+
+ # helper variable
+ is_local_index_global_attn = tf.range(max_num_global_attn_indices) < tf.expand_dims(
+ num_global_attn_indices, axis=-1
+ )
+
+ # location of the non-padding values within global attention indices
+ is_local_index_global_attn_nonzero = tf.where(is_local_index_global_attn)
+
+ # location of the padding values within global attention indices
+ is_local_index_no_global_attn_nonzero = tf.where(tf.math.logical_not(is_local_index_global_attn))
+
+ return (
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ )
+
+ def _concat_with_global_key_attn_probs(
+ self,
+ attn_scores,
+ key_vectors,
+ query_vectors,
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ ):
+ batch_size = shape_list(key_vectors)[0]
+
+ # select global key vectors
+ global_key_vectors = tf.gather_nd(key_vectors, is_index_global_attn_nonzero)
+
+ # create only global key vectors
+ key_vectors_only_global = tf.scatter_nd(
+ is_local_index_global_attn_nonzero,
+ global_key_vectors,
+ shape=(
+ batch_size,
+ max_num_global_attn_indices,
+ self.num_heads,
+ self.head_dim,
+ ),
+ )
+
+ # (batch_size, seq_len, num_heads, max_num_global_attn_indices)
+ attn_probs_from_global_key = tf.einsum("blhd,bshd->blhs", query_vectors, key_vectors_only_global)
+
+ # (batch_size, max_num_global_attn_indices, seq_len, num_heads)
+ attn_probs_from_global_key_trans = tf.transpose(attn_probs_from_global_key, (0, 3, 1, 2))
+ mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple(
+ shape_list(attn_probs_from_global_key_trans)[-2:]
+ )
+ mask = tf.ones(mask_shape) * -10000.0
+ mask = tf.cast(mask, dtype=attn_probs_from_global_key_trans.dtype)
+
+ # scatter mask
+ attn_probs_from_global_key_trans = tf.tensor_scatter_nd_update(
+ attn_probs_from_global_key_trans,
+ is_local_index_no_global_attn_nonzero,
+ mask,
+ )
+
+ # (batch_size, seq_len, num_heads, max_num_global_attn_indices)
+ attn_probs_from_global_key = tf.transpose(attn_probs_from_global_key_trans, (0, 2, 3, 1))
+
+ # concat to attn_probs
+ # (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
+ attn_scores = tf.concat((attn_probs_from_global_key, attn_scores), axis=-1)
+
+ return attn_scores
+
+ def _compute_attn_output_with_global_indices(
+ self,
+ value_vectors,
+ attn_probs,
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ ):
+ batch_size = shape_list(attn_probs)[0]
+
+ # cut local attn probs to global only
+ attn_probs_only_global = attn_probs[:, :, :, :max_num_global_attn_indices]
+
+ # select global value vectors
+ global_value_vectors = tf.gather_nd(value_vectors, is_index_global_attn_nonzero)
+
+ # create only global value vectors
+ value_vectors_only_global = tf.scatter_nd(
+ is_local_index_global_attn_nonzero,
+ global_value_vectors,
+ shape=(
+ batch_size,
+ max_num_global_attn_indices,
+ self.num_heads,
+ self.head_dim,
+ ),
+ )
+
+ # compute attn output only global
+ attn_output_only_global = tf.einsum("blhs,bshd->blhd", attn_probs_only_global, value_vectors_only_global)
+
+ # reshape attn probs
+ attn_probs_without_global = attn_probs[:, :, :, max_num_global_attn_indices:]
+
+ # compute attn output with global
+ attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
+ attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
+ )
+
+ return attn_output_only_global + attn_output_without_global
+
+ def _compute_global_attn_output_from_hidden(
+ self,
+ attn_output,
+ hidden_states,
+ max_num_global_attn_indices,
+ layer_head_mask,
+ is_local_index_global_attn_nonzero,
+ is_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ is_index_masked,
+ training,
+ ):
+ batch_size, seq_len = shape_list(hidden_states)[:2]
+
+ # prepare global hidden states
+ global_attn_hidden_states = tf.gather_nd(hidden_states, is_index_global_attn_nonzero)
+ global_attn_hidden_states = tf.scatter_nd(
+ is_local_index_global_attn_nonzero,
+ global_attn_hidden_states,
+ shape=(batch_size, max_num_global_attn_indices, self.embed_dim),
+ )
+
+ # global key, query, value
+ global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
+ global_key_vectors = self.key_global(hidden_states)
+ global_value_vectors = self.value_global(hidden_states)
+
+ # normalize
+ global_query_vectors_only_global /= tf.math.sqrt(
+ tf.cast(self.head_dim, dtype=global_query_vectors_only_global.dtype)
+ )
+ global_query_vectors_only_global = self.reshape_and_transpose(global_query_vectors_only_global, batch_size)
+ global_key_vectors = self.reshape_and_transpose(global_key_vectors, batch_size)
+ global_value_vectors = self.reshape_and_transpose(global_value_vectors, batch_size)
+
+ # compute attn scores
+ global_attn_scores = tf.matmul(global_query_vectors_only_global, global_key_vectors, transpose_b=True)
+
+ tf.debugging.assert_equal(
+ shape_list(global_attn_scores),
+ [batch_size * self.num_heads, max_num_global_attn_indices, seq_len],
+ message=(
+ "global_attn_scores have the wrong size. Size should be"
+ f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is"
+ f" {shape_list(global_attn_scores)}."
+ ),
+ )
+
+ global_attn_scores = tf.reshape(
+ global_attn_scores,
+ (batch_size, self.num_heads, max_num_global_attn_indices, seq_len),
+ )
+ global_attn_scores_trans = tf.transpose(global_attn_scores, (0, 2, 1, 3))
+ mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple(
+ shape_list(global_attn_scores_trans)[-2:]
+ )
+ global_attn_mask = tf.ones(mask_shape) * -10000.0
+ global_attn_mask = tf.cast(global_attn_mask, dtype=global_attn_scores_trans.dtype)
+
+ # scatter mask
+ global_attn_scores_trans = tf.tensor_scatter_nd_update(
+ global_attn_scores_trans,
+ is_local_index_no_global_attn_nonzero,
+ global_attn_mask,
+ )
+ global_attn_scores = tf.transpose(global_attn_scores_trans, (0, 2, 1, 3))
+
+ # mask global attn scores
+ attn_mask = tf.tile(is_index_masked[:, None, None, :], (1, shape_list(global_attn_scores)[1], 1, 1))
+ global_attn_scores = tf.where(attn_mask, -10000.0, global_attn_scores)
+ global_attn_scores = tf.reshape(
+ global_attn_scores,
+ (batch_size * self.num_heads, max_num_global_attn_indices, seq_len),
+ )
+
+ # compute global attn probs
+ global_attn_probs_float = stable_softmax(global_attn_scores, axis=-1)
+
+ # apply layer head masking
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+ global_attn_probs_float = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
+ global_attn_probs_float, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
+ )
+ global_attn_probs_float = tf.reshape(
+ global_attn_probs_float, (batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
+ )
+
+ # dropout
+ global_attn_probs = self.global_dropout(global_attn_probs_float, training=training)
+
+ # global attn output
+ global_attn_output = tf.matmul(global_attn_probs, global_value_vectors)
+
+ tf.debugging.assert_equal(
+ shape_list(global_attn_output),
+ [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim],
+ message=(
+ "global_attn_output tensor has the wrong size. Size should be"
+ f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is"
+ f" {shape_list(global_attn_output)}."
+ ),
+ )
+
+ global_attn_output = tf.reshape(
+ global_attn_output,
+ (batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim),
+ )
+
+ # get only non zero global attn output
+ nonzero_global_attn_output = tf.gather_nd(
+ tf.transpose(global_attn_output, (0, 2, 1, 3)),
+ is_local_index_global_attn_nonzero,
+ )
+ nonzero_global_attn_output = tf.reshape(
+ nonzero_global_attn_output,
+ (shape_list(is_local_index_global_attn_nonzero)[0], -1),
+ )
+
+ # overwrite values with global attention
+ attn_output = tf.tensor_scatter_nd_update(
+ attn_output, is_index_global_attn_nonzero, nonzero_global_attn_output
+ )
+
+ global_attn_probs = tf.reshape(
+ global_attn_probs, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
+ )
+
+ return attn_output, global_attn_probs
+
+ def reshape_and_transpose(self, vector, batch_size):
+ return tf.reshape(
+ tf.transpose(
+ tf.reshape(vector, (batch_size, -1, self.num_heads, self.head_dim)),
+ (0, 2, 1, 3),
+ ),
+ (batch_size * self.num_heads, -1, self.head_dim),
+ )
+
+
+class TFLongformerAttention(keras.layers.Layer):
+ def __init__(self, config, layer_id=0, **kwargs):
+ super().__init__(**kwargs)
+
+ self.self_attention = TFLongformerSelfAttention(config, layer_id, name="self")
+ self.dense_output = TFLongformerSelfOutput(config, name="output")
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(self, inputs, training=False):
+ (
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ is_index_masked,
+ is_index_global_attn,
+ is_global_attn,
+ ) = inputs
+
+ self_outputs = self.self_attention(
+ [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn],
+ training=training,
+ )
+ attention_output = self.dense_output(self_outputs[0], hidden_states, training=training)
+ outputs = (attention_output,) + self_outputs[1:]
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attention", None) is not None:
+ with tf.name_scope(self.self_attention.name):
+ self.self_attention.build(None)
+ if getattr(self, "dense_output", None) is not None:
+ with tf.name_scope(self.dense_output.name):
+ self.dense_output.build(None)
+
+
+class TFLongformerLayer(keras.layers.Layer):
+ def __init__(self, config, layer_id=0, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attention = TFLongformerAttention(config, layer_id, name="attention")
+ self.intermediate = TFLongformerIntermediate(config, name="intermediate")
+ self.longformer_output = TFLongformerOutput(config, name="output")
+
+ def call(self, inputs, training=False):
+ (
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ is_index_masked,
+ is_index_global_attn,
+ is_global_attn,
+ ) = inputs
+
+ attention_outputs = self.attention(
+ [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn],
+ training=training,
+ )
+ attention_output = attention_outputs[0]
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.longformer_output(intermediate_output, attention_output, training=training)
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "longformer_output", None) is not None:
+ with tf.name_scope(self.longformer_output.name):
+ self.longformer_output.build(None)
+
+
+class TFLongformerEncoder(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+ self.layer = [TFLongformerLayer(config, i, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ padding_len=0,
+ is_index_masked=None,
+ is_index_global_attn=None,
+ is_global_attn=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = all_global_attentions = () if output_attentions else None
+
+ for idx, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ hidden_states_to_add = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states
+ all_hidden_states = all_hidden_states + (hidden_states_to_add,)
+
+ layer_outputs = layer_module(
+ [
+ hidden_states,
+ attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ is_index_masked,
+ is_index_global_attn,
+ is_global_attn,
+ ],
+ training=training,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ # bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
+ all_attentions = all_attentions + (tf.transpose(layer_outputs[1], (0, 2, 1, 3)),)
+
+ # bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
+ all_global_attentions = all_global_attentions + (tf.transpose(layer_outputs[2], (0, 1, 3, 2)),)
+
+ # Add last layer
+ if output_hidden_states:
+ hidden_states_to_add = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states
+ all_hidden_states = all_hidden_states + (hidden_states_to_add,)
+
+ # undo padding
+ # unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1)
+ hidden_states = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states
+ if output_attentions:
+ all_attentions = (
+ tuple([state[:, :, :-padding_len, :] for state in all_attentions])
+ if padding_len > 0
+ else all_attentions
+ )
+
+ if not return_dict:
+ return tuple(
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None
+ )
+
+ return TFLongformerBaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ global_attentions=all_global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFLongformerMainLayer(keras.layers.Layer):
+ config_class = LongformerConfig
+
+ def __init__(self, config, add_pooling_layer=True, **kwargs):
+ super().__init__(**kwargs)
+
+ if isinstance(config.attention_window, int):
+ assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
+ assert config.attention_window > 0, "`config.attention_window` has to be positive"
+ config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer
+ else:
+ assert len(config.attention_window) == config.num_hidden_layers, (
+ "`len(config.attention_window)` should equal `config.num_hidden_layers`. "
+ f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
+ )
+
+ self.config = config
+ self.num_hidden_layers = config.num_hidden_layers
+ self.initializer_range = config.initializer_range
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+ self.pad_token_id = config.pad_token_id
+ self.attention_window = config.attention_window
+ self.embeddings = TFLongformerEmbeddings(config, name="embeddings")
+ self.encoder = TFLongformerEncoder(config, name="encoder")
+ self.pooler = TFLongformerPooler(config, name="pooler") if add_pooling_layer else None
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ head_mask=None,
+ global_attention_mask=None,
+ token_type_ids=None,
+ position_ids=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and not isinstance(input_ids, tf.Tensor):
+ input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64)
+ elif input_ids is not None:
+ input_ids = tf.cast(input_ids, tf.int64)
+
+ if attention_mask is not None and not isinstance(attention_mask, tf.Tensor):
+ attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64)
+ elif attention_mask is not None:
+ attention_mask = tf.cast(attention_mask, tf.int64)
+
+ if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor):
+ global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64)
+ elif global_attention_mask is not None:
+ global_attention_mask = tf.cast(global_attention_mask, tf.int64)
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.cast(tf.fill(input_shape, 1), tf.int64)
+
+ if token_type_ids is None:
+ token_type_ids = tf.cast(tf.fill(input_shape, 0), tf.int64)
+
+ # merge `global_attention_mask` and `attention_mask`
+ if global_attention_mask is not None:
+ attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)
+
+ (
+ padding_len,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ inputs_embeds,
+ ) = self._pad_to_window_size(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ pad_token_id=self.pad_token_id,
+ )
+
+ # is index masked or global attention
+ is_index_masked = tf.math.less(attention_mask, 1)
+ is_index_global_attn = tf.math.greater(attention_mask, 1)
+ is_global_attn = tf.math.reduce_any(is_index_global_attn)
+
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, to_seq_length, 1, 1]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask_shape = shape_list(attention_mask)
+ extended_attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], attention_mask_shape[1], 1, 1))
+
+ # Since attention_mask is 1.0 for positions we want to attend locally and 0.0 for
+ # masked and global attn positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = tf.cast(tf.math.abs(1 - extended_attention_mask), tf.dtypes.float32) * -10000.0
+ embedding_output = self.embeddings(
+ input_ids,
+ position_ids,
+ token_type_ids,
+ inputs_embeds,
+ training=training,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ padding_len=padding_len,
+ is_index_masked=is_index_masked,
+ is_index_global_attn=is_index_global_attn,
+ is_global_attn=is_global_attn,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (
+ sequence_output,
+ pooled_output,
+ ) + encoder_outputs[1:]
+
+ return TFLongformerBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ global_attentions=encoder_outputs.global_attentions,
+ )
+
+ def _pad_to_window_size(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ inputs_embeds,
+ pad_token_id,
+ ):
+ """A helper function to pad tokens and mask to work with implementation of Longformer selfattention."""
+ # padding
+ attention_window = (
+ self.attention_window if isinstance(self.attention_window, int) else max(self.attention_window)
+ )
+
+ assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
+
+ input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)
+ batch_size, seq_len = input_shape[:2]
+ padding_len = (attention_window - seq_len % attention_window) % attention_window
+
+ paddings = tf.convert_to_tensor([[0, 0], [0, padding_len]])
+
+ if input_ids is not None:
+ input_ids = tf.pad(input_ids, paddings, constant_values=pad_token_id)
+
+ if position_ids is not None:
+ # pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
+ position_ids = tf.pad(position_ids, paddings, constant_values=pad_token_id)
+
+ if inputs_embeds is not None:
+ if padding_len > 0:
+ input_ids_padding = tf.cast(tf.fill((batch_size, padding_len), self.pad_token_id), tf.int64)
+ inputs_embeds_padding = self.embeddings(input_ids_padding)
+ inputs_embeds = tf.concat([inputs_embeds, inputs_embeds_padding], axis=-2)
+
+ attention_mask = tf.pad(attention_mask, paddings, constant_values=False) # no attention on the padding tokens
+ token_type_ids = tf.pad(token_type_ids, paddings, constant_values=0) # pad with token_type_id = 0
+
+ return (
+ padding_len,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ inputs_embeds,
+ )
+
+ @staticmethod
+ def _merge_to_attention_mask(attention_mask: tf.Tensor, global_attention_mask: tf.Tensor):
+ # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
+ # (global_attention_mask + 1) => 1 for local attention, 2 for global attention
+ # => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
+ if attention_mask is not None:
+ attention_mask = attention_mask * (global_attention_mask + 1)
+ else:
+ # simply use `global_attention_mask` as `attention_mask`
+ # if no `attention_mask` is given
+ attention_mask = global_attention_mask + 1
+
+ return attention_mask
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+
+
+class TFLongformerPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LongformerConfig
+ base_model_prefix = "longformer"
+
+ @property
+ def input_signature(self):
+ sig = super().input_signature
+ sig["global_attention_mask"] = tf.TensorSpec((None, None), tf.int32, name="global_attention_mask")
+ return sig
+
+
+LONGFORMER_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`LongformerConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+LONGFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ global_attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to decide the attention given on each token, local attention or global attention. Tokens with global
+ attention attends to all other tokens, and all other tokens attend to them. This is important for
+ task-specific finetuning because it makes the model more flexible at representing the task. For example,
+ for classification, the token should be given global attention. For QA, all question tokens should also
+ have global attention. Please refer to the [Longformer paper](https://arxiv.org/abs/2004.05150) for more
+ details. Mask values selected in `[0, 1]`:
+
+ - 0 for local attention (a sliding window attention),
+ - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
+
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare Longformer Model outputting raw hidden-states without any specific head on top.",
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerModel(TFLongformerPreTrainedModel):
+ """
+
+ This class copies code from [`TFRobertaModel`] and overwrites standard self-attention with longformer
+ self-attention to provide the ability to process long sequences following the self-attention approach described in
+ [Longformer: the Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, and
+ Arman Cohan. Longformer self-attention combines a local (sliding window) and global attention to extend to long
+ documents without the O(n^2) increase in memory and compute.
+
+ The self-attention module `TFLongformerSelfAttention` implemented here supports the combination of local and global
+ attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated
+ attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future
+ release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA
+ kernel to be memory and compute efficient.
+
+ """
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.longformer = TFLongformerMainLayer(config, name="longformer")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ outputs = self.longformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=global_attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+
+
+@add_start_docstrings(
+ """Longformer Model with a `language modeling` head on top.""",
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerForMaskedLM(TFLongformerPreTrainedModel, TFMaskedLanguageModelingLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer")
+ self.lm_head = TFLongformerLMHead(config, self.longformer.embeddings, name="lm_head")
+
+ def get_lm_head(self):
+ return self.lm_head
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.lm_head.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="allenai/longformer-base-4096",
+ output_type=TFLongformerMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="",
+ expected_output="' Paris'",
+ expected_loss=0.44,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+
+ outputs = self.longformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=global_attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output, training=training)
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return TFLongformerMaskedLMOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build(None)
+
+
+@add_start_docstrings(
+ """
+ Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD /
+ TriviaQA (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerForQuestionAnswering(TFLongformerPreTrainedModel, TFQuestionAnsweringLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+ self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer")
+ self.qa_outputs = keras.layers.Dense(
+ config.num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="qa_outputs",
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="allenai/longformer-large-4096-finetuned-triviaqa",
+ output_type=TFLongformerQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="' puppet'",
+ expected_loss=0.96,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+
+ if input_ids is not None and not isinstance(input_ids, tf.Tensor):
+ input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64)
+ elif input_ids is not None:
+ input_ids = tf.cast(input_ids, tf.int64)
+
+ if attention_mask is not None and not isinstance(attention_mask, tf.Tensor):
+ attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64)
+ elif attention_mask is not None:
+ attention_mask = tf.cast(attention_mask, tf.int64)
+
+ if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor):
+ global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64)
+ elif global_attention_mask is not None:
+ global_attention_mask = tf.cast(global_attention_mask, tf.int64)
+
+ # set global attention on question tokens
+ if global_attention_mask is None and input_ids is not None:
+ if shape_list(tf.where(input_ids == self.config.sep_token_id))[0] != 3 * shape_list(input_ids)[0]:
+ logger.warning(
+ f"There should be exactly three separator tokens: {self.config.sep_token_id} in every sample for"
+ " questions answering. You might also consider to set `global_attention_mask` manually in the"
+ " forward function to avoid this. This is most likely an error. The global attention is disabled"
+ " for this forward pass."
+ )
+ global_attention_mask = tf.cast(tf.fill(shape_list(input_ids), value=0), tf.int64)
+ else:
+ logger.warning_once("Initializing global attention on question tokens...")
+ # put global attention on all tokens until `config.sep_token_id` is reached
+ sep_token_indices = tf.where(input_ids == self.config.sep_token_id)
+ sep_token_indices = tf.cast(sep_token_indices, dtype=tf.int64)
+ global_attention_mask = _compute_global_attention_mask(shape_list(input_ids), sep_token_indices)
+
+ outputs = self.longformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=global_attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+ loss = None
+
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return TFLongformerQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
+
+
+class TFLongformerClassificationHead(keras.layers.Layer):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.out_proj = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
+ )
+ self.config = config
+
+ def call(self, hidden_states, training=False):
+ hidden_states = hidden_states[:, 0, :] # take token (equiv. to [CLS])
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ output = self.out_proj(hidden_states)
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerForSequenceClassification(TFLongformerPreTrainedModel, TFSequenceClassificationLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer")
+ self.classifier = TFLongformerClassificationHead(config, name="classifier")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFLongformerSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ if input_ids is not None and not isinstance(input_ids, tf.Tensor):
+ input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64)
+ elif input_ids is not None:
+ input_ids = tf.cast(input_ids, tf.int64)
+
+ if attention_mask is not None and not isinstance(attention_mask, tf.Tensor):
+ attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64)
+ elif attention_mask is not None:
+ attention_mask = tf.cast(attention_mask, tf.int64)
+
+ if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor):
+ global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64)
+ elif global_attention_mask is not None:
+ global_attention_mask = tf.cast(global_attention_mask, tf.int64)
+
+ if global_attention_mask is None and input_ids is not None:
+ logger.warning_once("Initializing global attention on CLS token...")
+ # global attention on cls token
+ global_attention_mask = tf.zeros_like(input_ids)
+ updates = tf.ones(shape_list(input_ids)[0], dtype=tf.int64)
+ indices = tf.pad(
+ tensor=tf.expand_dims(tf.range(shape_list(input_ids)[0], dtype=tf.int64), axis=1),
+ paddings=[[0, 0], [0, 1]],
+ constant_values=0,
+ )
+ global_attention_mask = tf.tensor_scatter_nd_update(
+ global_attention_mask,
+ indices,
+ updates,
+ )
+
+ outputs = self.longformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=global_attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFLongformerSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build(None)
+
+
+@add_start_docstrings(
+ """
+ Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerForMultipleChoice(TFLongformerPreTrainedModel, TFMultipleChoiceLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.longformer = TFLongformerMainLayer(config, name="longformer")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @property
+ def input_signature(self):
+ return {
+ "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
+ "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
+ "global_attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="global_attention_mask"),
+ }
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(
+ LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFLongformerMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
+ flat_global_attention_mask = (
+ tf.reshape(global_attention_mask, (-1, shape_list(global_attention_mask)[-1]))
+ if global_attention_mask is not None
+ else None
+ )
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.longformer(
+ flat_input_ids,
+ position_ids=flat_position_ids,
+ token_type_ids=flat_token_type_ids,
+ attention_mask=flat_attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=flat_global_attention_mask,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFLongformerMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerForTokenClassification(TFLongformerPreTrainedModel, TFTokenClassificationLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+ self.longformer = TFLongformerMainLayer(config=config, add_pooling_layer=False, name="longformer")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFLongformerTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[Union[np.array, tf.Tensor]] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+
+ outputs = self.longformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=global_attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFLongformerTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+__all__ = [
+ "TFLongformerForMaskedLM",
+ "TFLongformerForMultipleChoice",
+ "TFLongformerForQuestionAnswering",
+ "TFLongformerForSequenceClassification",
+ "TFLongformerForTokenClassification",
+ "TFLongformerModel",
+ "TFLongformerPreTrainedModel",
+ "TFLongformerSelfAttention",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer.py b/janus/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..afecf750135b0d4a45f79ea91cfb223beefa814c
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer.py
@@ -0,0 +1,402 @@
+# coding=utf-8
+# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+from functools import lru_cache
+from typing import List, Optional, Tuple
+
+import regex as re
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
+
+
+@lru_cache()
+# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on.
+
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
+ tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+# Copied from transformers.models.roberta.tokenization_roberta.get_pairs
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word.
+
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer with FacebookAI/roberta-base->allenai/longformer-base-4096, RoBERTa->Longformer all-casing, RobertaTokenizer->LongformerTokenizer
+class LongformerTokenizer(PreTrainedTokenizer):
+ """
+ Constructs a Longformer tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import LongformerTokenizer
+
+ >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096")
+ >>> tokenizer("Hello world")["input_ids"]
+ [0, 31414, 232, 2]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [0, 20920, 232, 2]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Longformer tokenizer detect beginning of words by the preceding space).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
+ if isinstance(mask_token, str)
+ else mask_token
+ )
+
+ # these special tokens are not part of the vocab.json, let's add them in the correct order
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.errors = errors # how to handle errors in decoding
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+ self.cache = {}
+ self.add_prefix_space = add_prefix_space
+
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
+
+ super().__init__(
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ return len(self.encoder)
+
+ def get_vocab(self):
+ vocab = dict(self.encoder).copy()
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ bpe_tokens = []
+ for token in re.findall(self.pat, text):
+ token = "".join(
+ self.byte_encoder[b] for b in token.encode("utf-8")
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+ return bpe_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ text = "".join(tokens)
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
+ return text
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A Longformer sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
+ text = " " + text
+ return (text, kwargs)
+
+
+__all__ = ["LongformerTokenizer"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer_fast.py b/janus/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8111b3d8a25f35bb1fae0f1aa28e28b35fa3cfd
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer_fast.py
@@ -0,0 +1,265 @@
+# coding=utf-8
+# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fast Tokenization classes for Longformer."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import processors
+
+from ...tokenization_utils_base import AddedToken, BatchEncoding
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_longformer import LongformerTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
+
+
+# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast with FacebookAI/roberta-base->allenai/longformer-base-4096, RoBERTa->Longformer all-casing, Roberta->Longformer
+class LongformerTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" Longformer tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2
+ tokenizer, using byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import LongformerTokenizerFast
+
+ >>> tokenizer = LongformerTokenizerFast.from_pretrained("allenai/longformer-base-4096")
+ >>> tokenizer("Hello world")["input_ids"]
+ [0, 31414, 232, 2]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [0, 20920, 232, 2]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Longformer tokenizer detect beginning of words by the preceding space).
+ trim_offsets (`bool`, *optional*, defaults to `True`):
+ Whether the post processing step should trim offsets to avoid including whitespaces.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = LongformerTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ merges_file=None,
+ tokenizer_file=None,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ trim_offsets=True,
+ **kwargs,
+ ):
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
+ if isinstance(mask_token, str)
+ else mask_token
+ )
+ super().__init__(
+ vocab_file,
+ merges_file,
+ tokenizer_file=tokenizer_file,
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ trim_offsets=trim_offsets,
+ **kwargs,
+ )
+
+ tokenizer_component = "post_processor"
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
+ if tokenizer_component_instance:
+ state = json.loads(tokenizer_component_instance.__getstate__())
+
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
+ if "sep" in state:
+ state["sep"] = tuple(state["sep"])
+ if "cls" in state:
+ state["cls"] = tuple(state["cls"])
+
+ changes_to_apply = False
+
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ state["add_prefix_space"] = add_prefix_space
+ changes_to_apply = True
+
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
+ state["trim_offsets"] = trim_offsets
+ changes_to_apply = True
+
+ if changes_to_apply:
+ component_class = getattr(processors, state.pop("type"))
+ new_value = component_class(**state)
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
+
+ @property
+ def mask_token(self) -> str:
+ """
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
+ having been set.
+
+ Longformer tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
+ comprise the space before the **.
+ """
+ if self._mask_token is None:
+ if self.verbose:
+ logger.error("Using mask_token, but it is not set yet.")
+ return None
+ return str(self._mask_token)
+
+ @mask_token.setter
+ def mask_token(self, value):
+ """
+ Overriding the default behavior of the mask token to have it eat the space before it.
+
+ This is needed to preserve backward compatibility with all the previously used models based on Longformer.
+ """
+ # Mask token behave like a normal word, i.e. include the space before it
+ # So we set lstrip to True
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
+ self._mask_token = value
+
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._batch_encode_plus(*args, **kwargs)
+
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._encode_plus(*args, **kwargs)
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
+ if token_ids_1 is None:
+ return output
+
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+
+__all__ = ["LongformerTokenizerFast"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/mixtral/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/mixtral/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ee4834dd24984fd864e730933b31ee6503b4a14
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/mixtral/__init__.py
@@ -0,0 +1,66 @@
+# Copyright 2023 Mixtral AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_mixtral": ["MixtralConfig"],
+}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_mixtral"] = [
+ "MixtralForCausalLM",
+ "MixtralForQuestionAnswering",
+ "MixtralModel",
+ "MixtralPreTrainedModel",
+ "MixtralForSequenceClassification",
+ "MixtralForTokenClassification",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_mixtral import MixtralConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_mixtral import (
+ MixtralForCausalLM,
+ MixtralForQuestionAnswering,
+ MixtralForSequenceClassification,
+ MixtralForTokenClassification,
+ MixtralModel,
+ MixtralPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/mixtral/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/mixtral/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..580b3defa1a15a3c9ab41dab2342928fead9cce0
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/mixtral/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/mixtral/__pycache__/configuration_mixtral.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/mixtral/__pycache__/configuration_mixtral.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b7d260150cfd8ea8c33c1970915253148bf88745
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/mixtral/__pycache__/configuration_mixtral.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/mixtral/__pycache__/modeling_mixtral.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/mixtral/__pycache__/modeling_mixtral.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..636a202215dc0d76f65729d690fd5671a3b14009
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/mixtral/__pycache__/modeling_mixtral.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/mixtral/configuration_mixtral.py b/janus/lib/python3.10/site-packages/transformers/models/mixtral/configuration_mixtral.py
new file mode 100644
index 0000000000000000000000000000000000000000..686c214ef25ce541f80005b396a2df0f7fd673a4
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/mixtral/configuration_mixtral.py
@@ -0,0 +1,173 @@
+# coding=utf-8
+# Copyright 2023 Mixtral AI and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Mixtral model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class MixtralConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an
+ Mixtral model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Mixtral-7B-v0.1 or Mixtral-7B-Instruct-v0.1.
+
+ [mixtralai/Mixtral-8x7B](https://huggingface.co/mixtralai/Mixtral-8x7B)
+ [mixtralai/Mixtral-7B-Instruct-v0.1](https://huggingface.co/mixtralai/Mixtral-7B-Instruct-v0.1)
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`MixtralModel`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 14336):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_key_value_heads (`int`, *optional*, defaults to 8):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
+ head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
+ The attention head dimension.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
+ The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention
+ allows sequence of up to 4096*32 tokens.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ pad_token_id (`int`, *optional*):
+ The id of the padding token.
+ bos_token_id (`int`, *optional*, defaults to 1):
+ The id of the "beginning-of-sequence" token.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the "end-of-sequence" token.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether the model's input and output word embeddings should be tied.
+ rope_theta (`float`, *optional*, defaults to 1000000.0):
+ The base period of the RoPE embeddings.
+ sliding_window (`int`, *optional*):
+ Sliding window attention window size. If not specified, will default to `4096`.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
+ parameter
+ num_local_experts (`int`, *optional*, defaults to 8):
+ Number of experts per Sparse MLP layer.
+ output_router_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not the router logits should be returned by the model. Enabeling this will also
+ allow the model to output the auxiliary loss. See [here]() for more details
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
+ The aux loss factor for the total loss.
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
+ Amount of noise to add to the router.
+
+ ```python
+ >>> from transformers import MixtralModel, MixtralConfig
+
+ >>> # Initializing a Mixtral 7B style configuration
+ >>> configuration = MixtralConfig()
+
+ >>> # Initializing a model from the Mixtral 7B style configuration
+ >>> model = MixtralModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "mixtral"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ hidden_size=4096,
+ intermediate_size=14336,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=8,
+ head_dim=None,
+ hidden_act="silu",
+ max_position_embeddings=4096 * 32,
+ initializer_range=0.02,
+ rms_norm_eps=1e-5,
+ use_cache=True,
+ pad_token_id=None,
+ bos_token_id=1,
+ eos_token_id=2,
+ tie_word_embeddings=False,
+ rope_theta=1e6,
+ sliding_window=None,
+ attention_dropout=0.0,
+ num_experts_per_tok=2,
+ num_local_experts=8,
+ output_router_logits=False,
+ router_aux_loss_coef=0.001,
+ router_jitter_noise=0.0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.sliding_window = sliding_window
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.attention_dropout = attention_dropout
+ self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
+
+ self.num_experts_per_tok = num_experts_per_tok
+ self.num_local_experts = num_local_experts
+ self.output_router_logits = output_router_logits
+ self.router_aux_loss_coef = router_aux_loss_coef
+ self.router_jitter_noise = router_jitter_noise
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
diff --git a/janus/lib/python3.10/site-packages/transformers/models/mixtral/modular_mixtral.py b/janus/lib/python3.10/site-packages/transformers/models/mixtral/modular_mixtral.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6069f69b33421349237815d44fa3a69ede36697
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/mixtral/modular_mixtral.py
@@ -0,0 +1,574 @@
+# coding=utf-8
+# Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Mixtral model."""
+
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...cache_utils import DynamicCache
+from ...modeling_flash_attention_utils import FlashAttentionKwargs
+from ...modeling_outputs import (
+ MoeCausalLMOutputWithPast,
+ MoeModelOutputWithPast,
+)
+from ...processing_utils import Unpack
+from ...utils import (
+ LossKwargs,
+ logging,
+)
+from ..mistral.modeling_mistral import (
+ MistralAttention,
+ MistralForCausalLM,
+ MistralForQuestionAnswering,
+ MistralForSequenceClassification,
+ MistralForTokenClassification,
+ MistralModel,
+ MistralRMSNorm,
+)
+from .configuration_mixtral import MixtralConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "mistralai/Mixtral-8x7B-v0.1"
+_CONFIG_FOR_DOC = "MixtralConfig"
+
+
+def load_balancing_loss_func(
+ gate_logits: Union[torch.Tensor, Tuple[torch.Tensor], None],
+ num_experts: Optional[int] = None,
+ top_k=2,
+ attention_mask: Optional[torch.Tensor] = None,
+) -> Union[torch.Tensor, int]:
+ r"""
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
+
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
+ experts is too unbalanced.
+
+ Args:
+ gate_logits:
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
+ shape [batch_size X sequence_length, num_experts].
+ num_experts:
+ Number of experts
+ top_k:
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
+ parameter.
+ attention_mask (`torch.Tensor`, *optional*):
+ The attention_mask used in forward function
+ shape [batch_size X sequence_length] if not None.
+
+ Returns:
+ The auxiliary loss.
+ """
+ if gate_logits is None or not isinstance(gate_logits, tuple):
+ return 0
+
+ if isinstance(gate_logits, tuple):
+ compute_device = gate_logits[0].device
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
+
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
+
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
+
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
+
+ if attention_mask is None:
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
+ else:
+ batch_size, sequence_length = attention_mask.shape
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
+
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
+ expert_attention_mask = (
+ attention_mask[None, :, :, None, None]
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
+ .reshape(-1, top_k, num_experts)
+ .to(compute_device)
+ )
+
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
+ expert_attention_mask, dim=0
+ )
+
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
+ router_per_expert_attention_mask = (
+ attention_mask[None, :, :, None]
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
+ .reshape(-1, num_experts)
+ .to(compute_device)
+ )
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
+ router_per_expert_attention_mask, dim=0
+ )
+
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
+ return overall_loss * num_experts
+
+
+class MixtralBlockSparseTop2MLP(nn.Module):
+ def __init__(self, config: MixtralConfig):
+ super().__init__()
+ self.ffn_dim = config.intermediate_size
+ self.hidden_dim = config.hidden_size
+
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
+ self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
+
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_states):
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
+ current_hidden_states = self.w2(current_hidden_states)
+ return current_hidden_states
+
+
+class MixtralSparseMoeBlock(nn.Module):
+ """
+ This implementation is
+ strictly equivalent to standard MoE with full capacity (no
+ dropped tokens). It's faster since it formulates MoE operations
+ in terms of block-sparse operations to accommodate imbalanced
+ assignments of tokens to experts, whereas standard MoE either
+ (1) drop tokens at the cost of reduced performance or (2) set
+ capacity factor to number of experts and thus waste computation
+ and memory on padding.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.hidden_dim = config.hidden_size
+ self.ffn_dim = config.intermediate_size
+ self.num_experts = config.num_local_experts
+ self.top_k = config.num_experts_per_tok
+
+ # gating
+ self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
+
+ self.experts = nn.ModuleList([MixtralBlockSparseTop2MLP(config) for _ in range(self.num_experts)])
+
+ # Jitter parameters
+ self.jitter_noise = config.router_jitter_noise
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ """ """
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
+ if self.training and self.jitter_noise > 0:
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
+ hidden_states = hidden_states.view(-1, hidden_dim)
+ # router_logits: (batch * sequence_length, n_experts)
+ router_logits = self.gate(hidden_states)
+
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
+ # we cast back to the input dtype
+ routing_weights = routing_weights.to(hidden_states.dtype)
+
+ final_hidden_states = torch.zeros(
+ (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
+ )
+
+ # One hot encode the selected experts to create an expert mask
+ # this will be used to easily index which expert is going to be sollicitated
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
+
+ # Loop over all available experts in the model and perform the computation on each expert
+ for expert_idx in range(self.num_experts):
+ expert_layer = self.experts[expert_idx]
+ idx, top_x = torch.where(expert_mask[expert_idx])
+
+ # Index the correct hidden states and compute the expert hidden state for
+ # the current expert. We need to make sure to multiply the output hidden
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
+
+ # However `index_add_` only support torch tensors for indexing so we'll use
+ # the `top_x` tensor here.
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
+ return final_hidden_states, router_logits
+
+
+class MixtralRMSNorm(MistralRMSNorm):
+ pass
+
+
+class MixtralAttention(MistralAttention):
+ pass
+
+
+class MixtralDecoderLayer(nn.Module):
+ def __init__(self, config: MixtralConfig, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.self_attn = MixtralAttention(config, layer_idx)
+
+ self.block_sparse_moe = MixtralSparseMoeBlock(config)
+ self.input_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = MixtralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ output_router_logits: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
+ should not be returned during inference.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ kwargs (`dict`, *optional*):
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
+ into the model
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ position_embeddings=position_embeddings,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states, router_logits = self.block_sparse_moe(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if output_router_logits:
+ outputs += (router_logits,)
+
+ return outputs
+
+
+class MixtralModel(MistralModel):
+ def __init__(self, config: MixtralConfig):
+ super().__init__(config)
+ self.layers = nn.ModuleList(
+ [MixtralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ if use_cache and past_key_values is None:
+ past_key_values = DynamicCache()
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ causal_mask = self._update_causal_mask(
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
+ )
+
+ hidden_states = inputs_embeds
+
+ # create position embeddings to be shared across the decoder layers
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_router_logits = () if output_router_logits else None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ causal_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ output_router_logits,
+ use_cache,
+ cache_position,
+ position_embeddings,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ output_router_logits=output_router_logits,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **flash_attn_kwargs,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if output_router_logits:
+ all_router_logits += (layer_outputs[-1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ output = MoeModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ router_logits=all_router_logits,
+ )
+ return output if return_dict else output.to_tuple()
+
+
+class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
+
+
+class MixtralForCausalLM(MistralForCausalLM):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = MixtralModel(config)
+ self.router_aux_loss_coef = config.router_aux_loss_coef
+ self.num_experts = config.num_local_experts
+ self.num_experts_per_tok = config.num_experts_per_tok
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ num_logits_to_keep: int = 0,
+ **kwargs: Unpack[KwargsForCausalLM],
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ num_logits_to_keep (`int`, *optional*):
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MixtralForCausalLM
+
+ >>> model = MixtralForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
+ >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
+ )
+
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_router_logits=output_router_logits,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs[0]
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
+
+ aux_loss = None
+ if output_router_logits:
+ aux_loss = load_balancing_loss_func(
+ outputs.router_logits if return_dict else outputs[-1],
+ self.num_experts,
+ self.num_experts_per_tok,
+ attention_mask,
+ )
+ if labels is not None:
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ if output_router_logits:
+ output = (aux_loss,) + output
+ return (loss,) + output if loss is not None else output
+
+ return MoeCausalLMOutputWithPast(
+ loss=loss,
+ aux_loss=aux_loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ router_logits=outputs.router_logits,
+ )
+
+
+class MixtralForSequenceClassification(MistralForSequenceClassification):
+ pass
+
+
+class MixtralForTokenClassification(MistralForTokenClassification):
+ pass
+
+
+class MixtralForQuestionAnswering(MistralForQuestionAnswering):
+ pass
diff --git a/janus/lib/python3.10/site-packages/transformers/models/musicgen/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/musicgen/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..880274309cbab486a90df05548a0e4d3f2ea0925
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/musicgen/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_musicgen import *
+ from .modeling_musicgen import *
+ from .processing_musicgen import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..de368741bab0cf89605e3b0340fa8c246c0ff0fe
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/configuration_musicgen.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/configuration_musicgen.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..485cf26f1a5a2a3531bcfbe31876f4a29a7de427
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/configuration_musicgen.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/modeling_musicgen.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/modeling_musicgen.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20215056bc00761779dadb2ca0a0cd92a6d6787d
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/modeling_musicgen.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/processing_musicgen.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/processing_musicgen.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a2a9569a2fd0163204cc36df0b4d8d13fd5493de
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/processing_musicgen.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/musicgen/configuration_musicgen.py b/janus/lib/python3.10/site-packages/transformers/models/musicgen/configuration_musicgen.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c38caf20dc4136e794e5762bf0b58847817dd19
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/musicgen/configuration_musicgen.py
@@ -0,0 +1,247 @@
+# coding=utf-8
+# Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""MusicGen model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..auto.configuration_auto import AutoConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+class MusicgenDecoderConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of an [`MusicgenDecoder`]. It is used to instantiate a
+ MusicGen decoder according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the MusicGen
+ [facebook/musicgen-small](https://huggingface.co/facebook/musicgen-small) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 2048):
+ Vocabulary size of the MusicgenDecoder model. Defines the number of different tokens that can be
+ represented by the `inputs_ids` passed when calling [`MusicgenDecoder`].
+ hidden_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 24):
+ Number of decoder layers.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer block.
+ ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. Typically, set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_factor (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ scale_embedding (`bool`, *optional*, defaults to `False`):
+ Scale embeddings by diving by sqrt(hidden_size).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether the model should return the last key/values attentions (not used by all models)
+ num_codebooks (`int`, *optional*, defaults to 4):
+ The number of parallel codebooks forwarded to the model.
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
+ Whether input and output word embeddings should be tied.
+ audio_channels (`int`, *optional*, defaults to 1
+ Number of channels in the audio data. Either 1 for mono or 2 for stereo. Stereo models generate a separate
+ audio stream for the left/right output channels. Mono models generate a single audio stream output.
+ """
+
+ model_type = "musicgen_decoder"
+ base_config_key = "decoder_config"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=2048,
+ max_position_embeddings=2048,
+ num_hidden_layers=24,
+ ffn_dim=4096,
+ num_attention_heads=16,
+ layerdrop=0.0,
+ use_cache=True,
+ activation_function="gelu",
+ hidden_size=1024,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ initializer_factor=0.02,
+ scale_embedding=False,
+ num_codebooks=4,
+ audio_channels=1,
+ pad_token_id=2048,
+ bos_token_id=2048,
+ eos_token_id=None,
+ tie_word_embeddings=False,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.ffn_dim = ffn_dim
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.initializer_factor = initializer_factor
+ self.layerdrop = layerdrop
+ self.use_cache = use_cache
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+ self.num_codebooks = num_codebooks
+
+ if audio_channels not in [1, 2]:
+ raise ValueError(f"Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.")
+ self.audio_channels = audio_channels
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+
+class MusicgenConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MusicgenModel`]. It is used to instantiate a
+ MusicGen model according to the specified arguments, defining the text encoder, audio encoder and MusicGen decoder
+ configs.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ kwargs (*optional*):
+ Dictionary of keyword arguments. Notably:
+
+ - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
+ defines the text encoder config.
+ - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
+ defines the audio encoder config.
+ - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
+ the decoder config.
+
+ Example:
+
+ ```python
+ >>> from transformers import (
+ ... MusicgenConfig,
+ ... MusicgenDecoderConfig,
+ ... T5Config,
+ ... EncodecConfig,
+ ... MusicgenForConditionalGeneration,
+ ... )
+
+ >>> # Initializing text encoder, audio encoder, and decoder model configurations
+ >>> text_encoder_config = T5Config()
+ >>> audio_encoder_config = EncodecConfig()
+ >>> decoder_config = MusicgenDecoderConfig()
+
+ >>> configuration = MusicgenConfig.from_sub_models_config(
+ ... text_encoder_config, audio_encoder_config, decoder_config
+ ... )
+
+ >>> # Initializing a MusicgenForConditionalGeneration (with random weights) from the facebook/musicgen-small style configuration
+ >>> model = MusicgenForConditionalGeneration(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ >>> config_text_encoder = model.config.text_encoder
+ >>> config_audio_encoder = model.config.audio_encoder
+ >>> config_decoder = model.config.decoder
+
+ >>> # Saving the model, including its configuration
+ >>> model.save_pretrained("musicgen-model")
+
+ >>> # loading model and config from pretrained folder
+ >>> musicgen_config = MusicgenConfig.from_pretrained("musicgen-model")
+ >>> model = MusicgenForConditionalGeneration.from_pretrained("musicgen-model", config=musicgen_config)
+ ```"""
+
+ model_type = "musicgen"
+ sub_configs = {
+ "text_encoder": AutoConfig,
+ "audio_encoder": AutoConfig,
+ "decoder": MusicgenDecoderConfig,
+ }
+ is_composition = True
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ if "text_encoder" not in kwargs or "audio_encoder" not in kwargs or "decoder" not in kwargs:
+ raise ValueError("Config has to be initialized with text_encoder, audio_encoder and decoder config")
+
+ text_encoder_config = kwargs.pop("text_encoder")
+ text_encoder_model_type = text_encoder_config.pop("model_type")
+
+ audio_encoder_config = kwargs.pop("audio_encoder")
+ audio_encoder_model_type = audio_encoder_config.pop("model_type")
+
+ decoder_config = kwargs.pop("decoder")
+
+ self.text_encoder = AutoConfig.for_model(text_encoder_model_type, **text_encoder_config)
+ self.audio_encoder = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config)
+ self.decoder = MusicgenDecoderConfig(**decoder_config)
+ self.is_encoder_decoder = True
+
+ @classmethod
+ def from_sub_models_config(
+ cls,
+ text_encoder_config: PretrainedConfig,
+ audio_encoder_config: PretrainedConfig,
+ decoder_config: MusicgenDecoderConfig,
+ **kwargs,
+ ):
+ r"""
+ Instantiate a [`MusicgenConfig`] (or a derived class) from text encoder, audio encoder and decoder
+ configurations.
+
+ Returns:
+ [`MusicgenConfig`]: An instance of a configuration object
+ """
+
+ return cls(
+ text_encoder=text_encoder_config.to_dict(),
+ audio_encoder=audio_encoder_config.to_dict(),
+ decoder=decoder_config.to_dict(),
+ **kwargs,
+ )
+
+ @property
+ # This is a property because you might want to change the codec model on the fly
+ def sampling_rate(self):
+ return self.audio_encoder.sampling_rate
+
+
+__all__ = ["MusicgenConfig", "MusicgenDecoderConfig"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/musicgen/modeling_musicgen.py b/janus/lib/python3.10/site-packages/transformers/models/musicgen/modeling_musicgen.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea5ff3a11c1185ee44434e4414bb96512883eae1
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/musicgen/modeling_musicgen.py
@@ -0,0 +1,2755 @@
+# coding=utf-8
+# Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Musicgen model."""
+
+import copy
+import inspect
+import math
+import random
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...generation import (
+ ClassifierFreeGuidanceLogitsProcessor,
+ GenerationConfig,
+ GenerationMixin,
+ GenerationMode,
+ LogitsProcessorList,
+ StoppingCriteriaList,
+)
+from ...modeling_attn_mask_utils import (
+ _prepare_4d_attention_mask,
+ _prepare_4d_attention_mask_for_sdpa,
+ _prepare_4d_causal_attention_mask,
+ _prepare_4d_causal_attention_mask_for_sdpa,
+)
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ ModelOutput,
+ Seq2SeqLMOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from ..auto.configuration_auto import AutoConfig
+from ..auto.modeling_auto import AutoModel
+from .configuration_musicgen import MusicgenConfig, MusicgenDecoderConfig
+
+
+if is_flash_attn_2_available():
+ from ...modeling_flash_attention_utils import _flash_attention_forward
+
+if TYPE_CHECKING:
+ from ...generation.streamers import BaseStreamer
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "MusicgenConfig"
+_CHECKPOINT_FOR_DOC = "facebook/musicgen-small"
+
+
+@dataclass
+class MusicgenUnconditionalInput(ModelOutput):
+ """
+ Args:
+ encoder_outputs (`Tuple[torch.FloatTensor]` of length 1, with tensor shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the text encoder model.
+ attention_mask (`torch.LongTensor`) of shape `(batch_size, sequence_length)`, *optional*):
+ Encoder attention mask to avoid performing attention on padding token indices. Mask values selected in `[0,
+ 1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**.
+ guidance_scale (`float`, *optional*):
+ Guidance scale for classifier free guidance, setting the balance between the conditional logits (predicted
+ from the prompts) and the unconditional logits (predicted without prompts).
+ """
+
+ encoder_outputs: Tuple[torch.FloatTensor] = None
+ attention_mask: torch.LongTensor = None
+ guidance_scale: float = None
+
+
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ # transpose to get (bsz, num_codebooks, seq_len)
+ input_ids = input_ids.transpose(1, 2)
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
+ if decoder_start_token_id is None:
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
+ shifted_input_ids[..., 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+class MusicgenSinusoidalPositionalEmbedding(nn.Module):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int):
+ super().__init__()
+ self.embedding_dim = embedding_dim
+ self.make_weights(num_positions, embedding_dim)
+
+ def make_weights(self, num_embeddings: int, embedding_dim: int):
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim)
+ if hasattr(self, "weights"):
+ # in forward put the weights on the correct dtype and device of the param
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
+
+ self.weights = nn.Parameter(emb_weights)
+ self.weights.requires_grad = False
+ self.weights.detach_()
+
+ @staticmethod
+ def get_embedding(num_embeddings: int, embedding_dim: int):
+ """
+ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
+ description in Section 3.5 of "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat([torch.cos(emb), torch.sin(emb)], dim=1).view(num_embeddings, -1)
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
+ return emb.to(torch.get_default_dtype())
+
+ @torch.no_grad()
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
+ bsz, codebooks, seq_len = input_ids.size()
+ # Create the position ids from the input token ids.
+ position_ids = (torch.arange(seq_len) + past_key_values_length).to(input_ids.device)
+ # expand embeddings if needed
+ if seq_len > self.weights.size(0):
+ self.make_weights(seq_len + self.offset, self.embedding_dim)
+ return self.weights.index_select(0, position_ids.view(-1)).detach()
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Musicgen
+class MusicgenAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[MusicgenConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->Musicgen
+class MusicgenFlashAttention2(MusicgenAttention):
+ """
+ Musicgen flash attention module. This module inherits from `MusicgenAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # MusicgenFlashAttention2 attention does not support output_attentions
+ if output_attentions:
+ raise ValueError("MusicgenFlashAttention2 attention does not support output_attentions")
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, q_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self._reshape(self.q_proj(hidden_states), -1, bsz)
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0].transpose(1, 2)
+ value_states = past_key_value[1].transpose(1, 2)
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._reshape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._reshape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1)
+ value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1)
+ else:
+ # self_attention
+ key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2))
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value[0].shape[-2]
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_output = _flash_attention_forward(
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ q_len,
+ dropout=self.dropout if self.training else 0.0,
+ is_causal=self.is_causal,
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, -1)
+ attn_output = self.out_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+class MusicgenSdpaAttention(MusicgenAttention):
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+ if output_attentions or layer_head_mask is not None:
+ # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "MusicgenModel is using MusicgenSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention"
+ ' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states,
+ key_value_states=key_value_states,
+ past_key_value=past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+
+ if (
+ attention_mask is not None
+ and (attention_mask.mean(dim=[1, 2, 3]) <= torch.finfo(attention_mask.dtype).min).any()
+ ):
+ logger.warning_once(
+ '`torch.nn.functional.scaled_dot_product_attention` does not support having an empty attention mask. Falling back to the manual attention implementation. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ "Note that this probably happens because `guidance_scale>1` or because you used `get_unconditional_inputs`. See https://github.com/huggingface/transformers/issues/31189 for more information."
+ )
+ return super().forward(
+ hidden_states,
+ key_value_states=key_value_states,
+ past_key_value=past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states)
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ query_states = self._shape(query_states, tgt_len, bsz)
+
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
+ # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case tgt_len == 1.
+ is_causal = True if self.is_causal and attention_mask is None and tgt_len > 1 else False
+
+ # NOTE: SDPA with memory-efficient backend is currently (torch==2.1.2) bugged when using non-contiguous inputs and a custom attn_mask,
+ # but we are fine here as `_shape` do call `.contiguous()`. Reference: https://github.com/pytorch/pytorch/issues/112577
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=attention_mask,
+ dropout_p=self.dropout if self.training else 0.0,
+ is_causal=is_causal,
+ )
+
+ if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, None, past_key_value
+
+
+MUSICGEN_ATTENTION_CLASSES = {
+ "eager": MusicgenAttention,
+ "sdpa": MusicgenSdpaAttention,
+ "flash_attention_2": MusicgenFlashAttention2,
+}
+
+
+class MusicgenDecoderLayer(nn.Module):
+ def __init__(self, config: MusicgenDecoderConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+
+ self.self_attn = MUSICGEN_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ bias=False,
+ is_causal=True,
+ config=config,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = MUSICGEN_ATTENTION_CLASSES[config._attn_implementation](
+ self.embed_dim,
+ config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ bias=False,
+ config=config,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=False)
+ self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=False)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ # Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class MusicgenPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = MusicgenDecoderConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["MusicgenDecoderLayer", "MusicgenAttention"]
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_factor
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+MUSICGEN_START_DOCSTRING = r"""
+
+ The Musicgen model was proposed in [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by
+ Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi, Alexandre Défossez. It is an
+ encoder decoder transformer trained on the task of conditional music generation
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`MusicgenConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MUSICGEN_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary, corresponding to the sequence of audio codes.
+
+ Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes,
+ such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+
+
+ The `decoder_input_ids` will automatically be converted from shape `(batch_size * num_codebooks,
+ target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If
+ you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of
+ frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks,
+ target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as
+ `decoder_input_ids`.
+
+
+
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+MUSICGEN_DECODER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes.
+
+ Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes,
+ such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+
+
+ The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks,
+ target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If
+ you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of
+ frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks,
+ target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as
+ `input_ids`.
+
+
+
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of
+ the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class MusicgenDecoder(MusicgenPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MusicgenDecoderLayer`]
+ """
+
+ def __init__(self, config: MusicgenDecoderConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.layerdrop
+ self.max_target_positions = config.max_position_embeddings
+ self.d_model = config.hidden_size
+ self.num_codebooks = config.num_codebooks
+ self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
+
+ embed_dim = config.vocab_size + 1
+ self.embed_tokens = nn.ModuleList(
+ [nn.Embedding(embed_dim, config.hidden_size) for _ in range(config.num_codebooks)]
+ )
+
+ self.embed_positions = MusicgenSinusoidalPositionalEmbedding(
+ config.max_position_embeddings,
+ config.hidden_size,
+ )
+
+ self.layers = nn.ModuleList([MusicgenDecoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.layer_norm = nn.LayerNorm(config.hidden_size)
+ self.attn_implementation = config._attn_implementation
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(MUSICGEN_DECODER_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ # (bsz * codebooks, seq_len) -> (bsz, codebooks, seq_len)
+ input = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1])
+ bsz, num_codebooks, seq_len = input.shape
+ input_shape = (bsz, seq_len)
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ input = inputs_embeds[:, :, -1:]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = sum([self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks)])
+
+ if self.attn_implementation == "flash_attention_2":
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ elif self.attn_implementation == "sdpa" and head_mask is None and not output_attentions:
+ # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on
+ # the manual implementation that requires a 4D causal mask in all cases.
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
+ attention_mask,
+ input_shape,
+ inputs_embeds,
+ past_key_values_length,
+ )
+ else:
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ if self.attn_implementation == "flash_attention_2":
+ encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
+ elif self.attn_implementation == "sdpa" and cross_attn_head_mask is None and not output_attentions:
+ # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on
+ # the manual implementation that requires a 4D causal mask in all cases.
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(
+ encoder_attention_mask,
+ inputs_embeds.dtype,
+ tgt_len=input_shape[-1],
+ )
+ else:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(input, past_key_values_length)
+
+ hidden_states = inputs_embeds + positions.to(inputs_embeds.device)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {attn_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ dropout_probability = random.uniform(0, 1)
+ if self.training and (dropout_probability < self.layerdrop):
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.forward,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Musicgen decoder model outputting raw hidden-states without any specific head on top.",
+ MUSICGEN_START_DOCSTRING,
+)
+class MusicgenModel(MusicgenPreTrainedModel):
+ def __init__(self, config: MusicgenDecoderConfig):
+ super().__init__(config)
+ self.decoder = MusicgenDecoder(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.decoder.embed_tokens = value
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(MUSICGEN_DECODER_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_attention_mask=encoder_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ hidden_states=decoder_outputs.hidden_states,
+ attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The MusicGen decoder model with a language modelling head on top.",
+ MUSICGEN_START_DOCSTRING,
+)
+class MusicgenForCausalLM(MusicgenPreTrainedModel, GenerationMixin):
+ def __init__(self, config: MusicgenDecoderConfig):
+ super().__init__(config)
+
+ self.model = MusicgenModel(config)
+
+ self.num_codebooks = config.num_codebooks
+ self.lm_heads = nn.ModuleList(
+ [nn.Linear(config.hidden_size, config.vocab_size, bias=False) for _ in range(config.num_codebooks)]
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_heads
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_heads = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model.decoder = decoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @add_start_docstrings_to_model_forward(MUSICGEN_DECODER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ Returns:
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (labels is not None) and (input_ids is None and inputs_embeds is None):
+ input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.bos_token_id)
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+
+ lm_logits = torch.stack([head(hidden_states) for head in self.lm_heads], dim=1)
+
+ loss = None
+ if labels is not None:
+ # since encoder hidden states have been concatenated to the decoder hidden states,
+ # we take the last timestamps corresponding to labels
+ logits = lm_logits[:, :, -labels.shape[1] :]
+
+ loss_fct = CrossEntropyLoss()
+ loss = torch.zeros([], device=self.device)
+
+ # per codebook cross-entropy
+ # -100 labels are ignored
+ labels = labels.masked_fill(labels == self.config.pad_token_id, -100)
+
+ # per codebook cross-entropy
+ # ref: https://github.com/facebookresearch/audiocraft/blob/69fea8b290ad1b4b40d28f92d1dfc0ab01dbab85/audiocraft/solvers/musicgen.py#L242-L243
+ for codebook in range(self.config.num_codebooks):
+ codebook_logits = logits[:, codebook].contiguous().view(-1, logits.shape[-1])
+ codebook_labels = labels[..., codebook].contiguous().view(-1)
+ loss += loss_fct(codebook_logits, codebook_labels)
+
+ loss = loss / self.config.num_codebooks
+
+ # (bsz, num_codebooks, seq_len, vocab_size) -> (bsz * num_codebooks, seq_len, vocab_size)
+ lm_logits = lm_logits.reshape(-1, *lm_logits.shape[2:])
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ use_cache=True,
+ delay_pattern_mask=None,
+ guidance_scale=None,
+ **kwargs,
+ ):
+ # Overwritten -- MusicGen has custom processing
+ if delay_pattern_mask is None:
+ input_ids, delay_pattern_mask = self.build_delay_pattern_mask(
+ input_ids,
+ pad_token_id=self.generation_config.pad_token_id,
+ max_length=self.generation_config.max_length,
+ )
+
+ # apply the delay pattern mask
+ input_ids = self.apply_delay_pattern_mask(input_ids, delay_pattern_mask)
+
+ if guidance_scale is not None and guidance_scale > 1:
+ # for classifier free guidance we need to replicate the decoder args across the batch dim (we'll split these
+ # before sampling)
+ input_ids = input_ids.repeat((2, 1))
+ if attention_mask is not None:
+ attention_mask = attention_mask.repeat((2, 1))
+
+ if past_key_values is not None:
+ input_ids = input_ids[:, -1:]
+
+ return {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "encoder_hidden_states": encoder_hidden_states,
+ "encoder_attention_mask": encoder_attention_mask,
+ "head_mask": head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ def build_delay_pattern_mask(self, input_ids: torch.LongTensor, pad_token_id: int, max_length: int = None):
+ """Build a delayed pattern mask to the input_ids. Each codebook is offset by the previous codebook by
+ one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there
+ are 4 codebooks and a max sequence length of 8, we have the delayed pattern mask of shape `(codebooks,
+ seq_len)`:
+ - [P, -1, -1, -1, -1, P, P, P]
+ - [P, P, -1, -1, -1, -1, P, P]
+ - [P, P, P, -1, -1, -1, -1, P]
+ - [P, P, P, P, -1, -1, -1, -1]
+ where P is the special padding token id and -1 indicates that the token is valid for prediction. If we include
+ a prompt (decoder input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the
+ mask is set to the value in the prompt:
+ - [P, a, b, -1, -1, P, P, P]
+ - [P, P, c, d, -1, -1, P, P]
+ - [P, P, P, e, f, -1, -1, P]
+ - [P, P, P, P, g, h, -1, -1]
+ where a-h indicate the input prompt (decoder input ids) that are offset by 1. Now, we only override the -1
+ tokens in our prediction.
+ """
+ # (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len)
+ input_ids = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1])
+ bsz, num_codebooks, seq_len = input_ids.shape
+
+ max_length = max_length if max_length is not None else self.generation_config.max_length
+ input_ids_shifted = (
+ torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1
+ )
+
+ channel_codebooks = num_codebooks // 2 if self.config.audio_channels == 2 else num_codebooks
+ # we only apply the mask if we have a large enough seq len - otherwise we return as is
+ if max_length < 2 * channel_codebooks - 1:
+ return input_ids.reshape(bsz * num_codebooks, -1), input_ids_shifted.reshape(bsz * num_codebooks, -1)
+
+ # fill the shifted ids with the prompt entries, offset by the codebook idx
+ for codebook in range(channel_codebooks):
+ if self.config.audio_channels == 1:
+ # mono channel - loop over the codebooks one-by-one
+ input_ids_shifted[:, codebook, codebook : seq_len + codebook] = input_ids[:, codebook]
+ else:
+ # left/right channels are interleaved in the generated codebooks, so handle one then the other
+ input_ids_shifted[:, 2 * codebook, codebook : seq_len + codebook] = input_ids[:, 2 * codebook]
+ input_ids_shifted[:, 2 * codebook + 1, codebook : seq_len + codebook] = input_ids[:, 2 * codebook + 1]
+
+ # construct a pattern mask that indicates the positions of padding tokens for each codebook
+ # first fill the upper triangular part (the EOS padding)
+ delay_pattern = torch.triu(
+ torch.ones((channel_codebooks, max_length), dtype=torch.bool), diagonal=max_length - channel_codebooks + 1
+ )
+ # then fill the lower triangular part (the BOS padding)
+ delay_pattern = delay_pattern + torch.tril(torch.ones((channel_codebooks, max_length), dtype=torch.bool))
+
+ if self.config.audio_channels == 2:
+ # for left/right channel we need to duplicate every row of the pattern mask in an interleaved fashion
+ delay_pattern = delay_pattern.repeat_interleave(2, dim=0)
+
+ mask = ~delay_pattern.to(input_ids.device)
+ input_ids = mask * input_ids_shifted + ~mask * pad_token_id
+
+ # find the first position to start generating - this is the first place we have the -1 token
+ # and will always be in the first codebook (since it has no codebook offset)
+ first_codebook_ids = input_ids[:, 0, :]
+ start_ids = (first_codebook_ids == -1).nonzero()[:, 1]
+ if len(start_ids) > 0:
+ first_start_id = min(start_ids)
+ else:
+ # we have no tokens that need to be filled - return entire matrix of input ids
+ first_start_id = seq_len
+
+ # (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len)
+ pattern_mask = input_ids.reshape(bsz * num_codebooks, -1)
+ input_ids = input_ids[..., :first_start_id].reshape(bsz * num_codebooks, -1)
+ return input_ids, pattern_mask
+
+ @staticmethod
+ def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask):
+ """Apply a delay pattern mask to the decoder input ids, only preserving predictions where
+ the mask is set to -1, and otherwise setting to the value detailed in the mask."""
+ seq_len = input_ids.shape[-1]
+ decoder_pad_token_mask = decoder_pad_token_mask[..., :seq_len]
+ input_ids = torch.where(decoder_pad_token_mask == -1, input_ids, decoder_pad_token_mask)
+ return input_ids
+
+ @torch.no_grad()
+ def generate(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ generation_config: Optional[GenerationConfig] = None,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ synced_gpus: Optional[bool] = None,
+ streamer: Optional["BaseStreamer"] = None,
+ **kwargs,
+ ):
+ """
+
+ Generates sequences of token ids for models with a language modeling head.
+
+
+
+ Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
+ model's default generation configuration. You can override any `generation_config` by passing the corresponding
+ parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+ Parameters:
+ inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
+ The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
+ method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
+ should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of
+ `input_ids`, `input_values`, `input_features`, or `pixel_values`.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and
+ generation config. If a logit processor is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ Custom stopping criteria that complement the default stopping criteria built from arguments and a
+ generation config. If a stopping criteria is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed to avoid deadlocking with
+ `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
+ streamer (`BaseStreamer`, *optional*):
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
+ specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
+
+ Return:
+ [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
+ or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.
+
+ If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
+ [`~utils.ModelOutput`] types are:
+
+ - [`~generation.GenerateDecoderOnlyOutput`],
+ - [`~generation.GenerateBeamDecoderOnlyOutput`]
+
+ If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
+ [`~utils.ModelOutput`] types are:
+
+ - [`~generation.GenerateEncoderDecoderOutput`],
+ - [`~generation.GenerateBeamEncoderDecoderOutput`]
+ """
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects
+ if generation_config is None:
+ generation_config = self.generation_config
+
+ generation_config = copy.deepcopy(generation_config)
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
+ generation_config.validate()
+ self._validate_model_kwargs(model_kwargs.copy())
+
+ # 2. Set generation parameters if not already defined
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+
+ requires_attention_mask = "encoder_outputs" not in model_kwargs
+ kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
+
+ # 3. Define model inputs`
+ input_ids, model_input_name, model_kwargs = self._prepare_model_inputs(
+ inputs, generation_config.bos_token_id, model_kwargs
+ )
+ batch_size = input_ids.shape[0] // self.num_codebooks
+ self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=input_ids.device)
+
+ # 4. Define other model kwargs
+ model_kwargs["use_cache"] = generation_config.use_cache
+ model_kwargs["guidance_scale"] = generation_config.guidance_scale
+
+ if model_kwargs.get("attention_mask", None) is None and requires_attention_mask:
+ model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
+ input_ids, generation_config, model_kwargs
+ )
+
+ # 5. Prepare `max_length` depending on other stopping criteria.
+ input_ids_length = input_ids.shape[-1]
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
+ has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
+ generation_config = self._prepare_generated_length(
+ generation_config=generation_config,
+ has_default_max_length=has_default_max_length,
+ has_default_min_length=has_default_min_length,
+ model_input_name=model_input_name,
+ inputs_tensor=input_ids,
+ input_ids_length=input_ids_length,
+ )
+
+ # 6. Prepare `input_ids` which will be used for auto-regressive generation
+ # Build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to MusicGen)
+ input_ids, delay_pattern_mask = self.build_delay_pattern_mask(
+ input_ids,
+ pad_token_id=generation_config._decoder_start_token_tensor,
+ max_length=generation_config.max_length,
+ )
+
+ if streamer is not None:
+ streamer.put(input_ids.cpu())
+
+ # stash the delay mask so that we don't have to recompute it in each forward pass
+ model_kwargs["delay_pattern_mask"] = delay_pattern_mask
+
+ # 7. determine generation mode
+ generation_mode = generation_config.get_generation_mode()
+
+ # 8. prepare batched CFG externally (to enable coexistance with the unbatched CFG)
+ if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1:
+ logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale))
+ generation_config.guidance_scale = None
+
+ # 9. prepare distribution pre_processing samplers
+ logits_processor = self._get_logits_processor(
+ generation_config=generation_config,
+ input_ids_seq_length=input_ids_length,
+ encoder_input_ids=input_ids,
+ prefix_allowed_tokens_fn=None,
+ logits_processor=logits_processor,
+ device=input_ids.device,
+ )
+
+ # 10. prepare stopping criteria
+ stopping_criteria = self._get_stopping_criteria(
+ generation_config=generation_config, stopping_criteria=stopping_criteria
+ )
+
+ if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH):
+ # expand input_ids with `num_return_sequences` additional sequences per batch
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
+ input_ids=input_ids,
+ expand_size=generation_config.num_return_sequences,
+ **model_kwargs,
+ )
+
+ # 11. run sample
+ outputs = self._sample(
+ input_ids,
+ logits_processor=logits_processor,
+ stopping_criteria=stopping_criteria,
+ generation_config=generation_config,
+ synced_gpus=synced_gpus,
+ streamer=streamer,
+ **model_kwargs,
+ )
+
+ else:
+ raise ValueError(
+ "Got incompatible mode for generation, should be one of greedy or sampling. "
+ "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`."
+ )
+
+ if generation_config.return_dict_in_generate:
+ output_ids = outputs.sequences
+ else:
+ output_ids = outputs
+
+ # apply the pattern mask to the final ids
+ output_ids = self.apply_delay_pattern_mask(output_ids, model_kwargs["delay_pattern_mask"])
+
+ # revert the pattern delay mask by filtering the pad token id
+ output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape(
+ batch_size, self.num_codebooks, -1
+ )
+
+ if generation_config.return_dict_in_generate:
+ outputs.sequences = output_ids
+ return outputs
+ else:
+ return output_ids
+
+
+@add_start_docstrings(
+ "The composite MusicGen model with a text encoder, audio encoder and Musicgen decoder, "
+ "for music generation tasks with one or both of text and audio prompts.",
+ MUSICGEN_START_DOCSTRING,
+)
+class MusicgenForConditionalGeneration(PreTrainedModel, GenerationMixin):
+ config_class = MusicgenConfig
+ base_model_prefix = "encoder_decoder"
+ main_input_name = "input_ids"
+ supports_gradient_checkpointing = True
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+
+ def __init__(
+ self,
+ config: Optional[MusicgenConfig] = None,
+ text_encoder: Optional[PreTrainedModel] = None,
+ audio_encoder: Optional[PreTrainedModel] = None,
+ decoder: Optional[MusicgenForCausalLM] = None,
+ ):
+ if config is None and (text_encoder is None or audio_encoder is None or decoder is None):
+ raise ValueError(
+ "Either a configuration has to be provided, or all three of text encoder, audio encoder and MusicGen decoder."
+ )
+ if config is None:
+ config = MusicgenConfig.from_sub_models_config(text_encoder.config, audio_encoder.config, decoder.config)
+ else:
+ if not isinstance(config, self.config_class):
+ raise ValueError(f"Config: {config} has to be of type {self.config_class}")
+
+ if config.decoder.cross_attention_hidden_size is not None:
+ if config.decoder.cross_attention_hidden_size != config.text_encoder.hidden_size:
+ raise ValueError(
+ "If `cross_attention_hidden_size` is specified in the MusicGen decoder's configuration, it has to be equal"
+ f" to the text encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
+ f" `config.decoder.cross_attention_hidden_size` and {config.text_encoder.hidden_size} for"
+ " `config.text_encoder.hidden_size`."
+ )
+
+ # initialize with config
+ super().__init__(config)
+
+ if text_encoder is None:
+ from ..auto.modeling_auto import AutoModelForTextEncoding
+
+ text_encoder = AutoModelForTextEncoding.from_config(config.text_encoder)
+
+ if audio_encoder is None:
+ from ..auto.modeling_auto import AutoModel
+
+ audio_encoder = AutoModel.from_config(config.audio_encoder)
+
+ if decoder is None:
+ decoder = MusicgenForCausalLM._from_config(config.decoder)
+
+ self.text_encoder = text_encoder
+ self.audio_encoder = audio_encoder
+ self.decoder = decoder
+
+ if self.text_encoder.config.to_dict() != self.config.text_encoder.to_dict():
+ logger.warning(
+ f"Config of the text_encoder: {self.text_encoder.__class__} is overwritten by shared text_encoder config:"
+ f" {self.config.text_encoder}"
+ )
+ if self.audio_encoder.config.to_dict() != self.config.audio_encoder.to_dict():
+ logger.warning(
+ f"Config of the audio_encoder: {self.audio_encoder.__class__} is overwritten by shared audio_encoder config:"
+ f" {self.config.audio_encoder}"
+ )
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
+ logger.warning(
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
+ f" {self.config.decoder}"
+ )
+
+ # make sure that the individual model's config refers to the shared config
+ # so that the updates to the config will be synced
+ self.config.text_encoder._attn_implementation = self.text_encoder.config._attn_implementation
+ self.config.audio_encoder._attn_implementation = self.audio_encoder.config._attn_implementation
+ self.config.decoder._attn_implementation = self.decoder.config._attn_implementation
+ self.text_encoder.config = self.config.text_encoder
+ self.audio_encoder.config = self.config.audio_encoder
+ self.decoder.config = self.config.decoder
+
+ # text encoder outputs might need to be projected to different dimension for decoder
+ if (
+ self.text_encoder.config.hidden_size != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ self.enc_to_dec_proj = nn.Linear(self.text_encoder.config.hidden_size, self.decoder.config.hidden_size)
+
+ if self.text_encoder.get_output_embeddings() is not None:
+ raise ValueError(
+ f"The encoder {self.text_encoder} should not have a LM Head. Please use a model without and LM Head"
+ )
+
+ decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys())
+ if "encoder_hidden_states" not in decoder_signature:
+ raise ValueError(
+ "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the "
+ "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350"
+ )
+
+ # tie text encoder, decoder weights if config set accordingly
+ self.tie_weights()
+
+ def tie_weights(self):
+ # tie text encoder & decoder if needed
+ if self.config.tie_encoder_decoder:
+ # tie text encoder and decoder base model
+ decoder_base_model_prefix = self.decoder.base_model_prefix
+ tied_weights = self._tie_encoder_decoder_weights(
+ self.text_encoder,
+ self.decoder._modules[decoder_base_model_prefix],
+ self.decoder.base_model_prefix,
+ "text_encoder",
+ )
+ # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class
+ # attributed not an instance member, therefore modifying it will modify the entire class
+ # Leading to issues on subsequent calls by different tests or subsequent calls.
+ self._dynamic_tied_weights_keys = tied_weights
+
+ def get_audio_encoder(self):
+ return self.audio_encoder
+
+ def get_text_encoder(self):
+ return self.text_encoder
+
+ def get_encoder(self):
+ # get the text encoder to compute the encoder hidden-states for generation
+ return self.get_text_encoder()
+
+ def get_decoder(self):
+ return self.decoder
+
+ def get_input_embeddings(self):
+ return self.text_encoder.get_input_embeddings()
+
+ def get_output_embeddings(self):
+ return self.decoder.get_output_embeddings()
+
+ def set_output_embeddings(self, new_embeddings):
+ return self.decoder.set_output_embeddings(new_embeddings)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
+ r"""
+ Example:
+
+ ```python
+ >>> from transformers import MusicgenForConditionalGeneration
+
+ >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
+ ```"""
+
+ # At the moment fast initialization is not supported for composite models
+ if kwargs.get("_fast_init", False):
+ logger.warning(
+ "Fast initialization is currently not supported for MusicgenForConditionalGeneration. "
+ "Falling back to slow initialization..."
+ )
+ kwargs["_fast_init"] = False
+
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+
+ @classmethod
+ def from_sub_models_pretrained(
+ cls,
+ text_encoder_pretrained_model_name_or_path: str = None,
+ audio_encoder_pretrained_model_name_or_path: str = None,
+ decoder_pretrained_model_name_or_path: str = None,
+ *model_args,
+ **kwargs,
+ ) -> PreTrainedModel:
+ r"""
+ Instantiate a text encoder, an audio encoder, and a MusicGen decoder from one, two or three base classes of the
+ library from pretrained model checkpoints.
+
+
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
+ the model, you need to first set it back in training mode with `model.train()`.
+
+ Params:
+ text_encoder_pretrained_model_name_or_path (`str`, *optional*):
+ Information necessary to initiate the text encoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+
+ audio_encoder_pretrained_model_name_or_path (`str`, *optional*):
+ Information necessary to initiate the audio encoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the decoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaining positional arguments will be passed to the underlying model's `__init__` method.
+
+ kwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the text encoder configuration, use the prefix *text_encoder_* for each configuration
+ parameter.
+ - To update the audio encoder configuration, use the prefix *audio_encoder_* for each configuration
+ parameter.
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import MusicgenForConditionalGeneration
+
+ >>> # initialize a musicgen model from a t5 text encoder, encodec audio encoder, and musicgen decoder
+ >>> model = MusicgenForConditionalGeneration.from_sub_models_pretrained(
+ ... text_encoder_pretrained_model_name_or_path="google-t5/t5-base",
+ ... audio_encoder_pretrained_model_name_or_path="facebook/encodec_24khz",
+ ... decoder_pretrained_model_name_or_path="facebook/musicgen-small",
+ ... )
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./musicgen-ft")
+ >>> # load fine-tuned model
+ >>> model = MusicgenForConditionalGeneration.from_pretrained("./musicgen-ft")
+ ```"""
+
+ kwargs_text_encoder = {
+ argument[len("text_encoder_") :]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("text_encoder_")
+ }
+
+ kwargs_audio_encoder = {
+ argument[len("audio_encoder_") :]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("audio_encoder_")
+ }
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ # remove text encoder, audio encoder and decoder kwargs from kwargs
+ for key in kwargs_text_encoder.keys():
+ del kwargs["text_encoder_" + key]
+ for key in kwargs_audio_encoder.keys():
+ del kwargs["audio_encoder_" + key]
+ for key in kwargs_decoder.keys():
+ del kwargs["decoder_" + key]
+
+ # Load and initialize the encoder and decoder
+ # The distinction between encoder and decoder at the model level is made
+ # by the value of the flag `is_decoder` that we need to set correctly.
+ text_encoder = kwargs_text_encoder.pop("model", None)
+ if text_encoder is None:
+ if text_encoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `text_encoder_model` is not defined as an argument, a `text_encoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_text_encoder:
+ encoder_config, kwargs_text_encoder = AutoConfig.from_pretrained(
+ text_encoder_pretrained_model_name_or_path, **kwargs_text_encoder, return_unused_kwargs=True
+ )
+
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
+ logger.info(
+ f"Initializing {text_encoder_pretrained_model_name_or_path} as a text_encoder model "
+ "from a decoder model. Cross-attention and casual mask are disabled."
+ )
+ encoder_config.is_decoder = False
+ encoder_config.add_cross_attention = False
+
+ kwargs_text_encoder["config"] = encoder_config
+
+ text_encoder = AutoModel.from_pretrained(
+ text_encoder_pretrained_model_name_or_path, *model_args, **kwargs_text_encoder
+ )
+
+ audio_encoder = kwargs_audio_encoder.pop("model", None)
+ if audio_encoder is None:
+ if audio_encoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `audio_encoder_model` is not defined as an argument, an `audio_encoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_audio_encoder:
+ encoder_config, kwargs_audio_encoder = AutoConfig.from_pretrained(
+ audio_encoder_pretrained_model_name_or_path, **kwargs_audio_encoder, return_unused_kwargs=True
+ )
+
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
+ logger.info(
+ f"Initializing {audio_encoder_pretrained_model_name_or_path} as an audio_encoder model "
+ "from a decoder model. Cross-attention and casual mask are disabled."
+ )
+ encoder_config.is_decoder = False
+ encoder_config.add_cross_attention = False
+
+ kwargs_audio_encoder["config"] = encoder_config
+
+ audio_encoder = AutoModel.from_pretrained(
+ audio_encoder_pretrained_model_name_or_path, *model_args, **kwargs_audio_encoder
+ )
+
+ decoder = kwargs_decoder.pop("model", None)
+ if decoder is None:
+ if decoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_decoder:
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
+ )
+
+ if isinstance(decoder_config, MusicgenConfig):
+ decoder_config = decoder_config.decoder
+
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
+ logger.info(
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
+ )
+ decoder_config.is_decoder = True
+ decoder_config.add_cross_attention = True
+
+ kwargs_decoder["config"] = decoder_config
+
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
+ logger.warning(
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
+ "passed to `.from_sub_models_pretrained(...)` are set to `True` or do not pass a "
+ "`decoder_config` to `.from_sub_models_pretrained(...)`"
+ )
+
+ decoder = MusicgenForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
+
+ # instantiate config with corresponding kwargs
+ config = MusicgenConfig.from_sub_models_config(
+ text_encoder.config, audio_encoder.config, decoder.config, **kwargs
+ )
+ return cls(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder, config=config)
+
+ @add_start_docstrings_to_model_forward(MUSICGEN_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.BoolTensor] = None,
+ input_values: Optional[torch.FloatTensor] = None,
+ padding_mask: Optional[torch.BoolTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
+ past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple, Seq2SeqLMOutput]:
+ r"""
+ Returns:
+
+ Examples:
+ ```python
+ >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration
+ >>> import torch
+
+ >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
+ >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
+
+ >>> inputs = processor(
+ ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"],
+ ... padding=True,
+ ... return_tensors="pt",
+ ... )
+
+ >>> pad_token_id = model.generation_config.pad_token_id
+ >>> decoder_input_ids = (
+ ... torch.ones((inputs.input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long)
+ ... * pad_token_id
+ ... )
+
+ >>> logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits
+ >>> logits.shape # (bsz * num_codebooks, tgt_len, vocab_size)
+ torch.Size([8, 1, 2048])
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ kwargs_text_encoder = {
+ argument[len("text_encoder_")]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("text_encoder_")
+ }
+
+ kwargs_audio_encoder = {
+ argument[len("audio_encoder_")]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("audio_encoder_")
+ }
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ if encoder_outputs is None:
+ encoder_outputs = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs_text_encoder,
+ )
+ elif isinstance(encoder_outputs, tuple):
+ encoder_outputs = BaseModelOutput(*encoder_outputs)
+
+ encoder_hidden_states = encoder_outputs[0]
+
+ # optionally project encoder_hidden_states
+ if (
+ self.text_encoder.config.hidden_size != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
+
+ if attention_mask is not None:
+ encoder_hidden_states = encoder_hidden_states * attention_mask[..., None]
+
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.decoder.pad_token_id, self.config.decoder.decoder_start_token_id
+ )
+
+ elif decoder_input_ids is None and decoder_inputs_embeds is None:
+ audio_encoder_outputs = self.audio_encoder(
+ input_values=input_values,
+ padding_mask=padding_mask,
+ **kwargs_audio_encoder,
+ )
+ audio_codes = audio_encoder_outputs.audio_codes
+ frames, bsz, codebooks, seq_len = audio_codes.shape
+ if frames != 1:
+ raise ValueError(
+ f"Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is "
+ "disabled by setting `chunk_length=None` in the audio encoder."
+ )
+
+ if self.config.decoder.audio_channels == 2 and audio_codes.shape[2] == self.decoder.num_codebooks // 2:
+ # mono input through encodec that we convert to stereo
+ audio_codes = audio_codes.repeat_interleave(2, dim=2)
+
+ decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len)
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ use_cache=use_cache,
+ past_key_values=past_key_values,
+ return_dict=return_dict,
+ labels=labels,
+ **kwargs_decoder,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqLMOutput(
+ loss=decoder_outputs.loss,
+ logits=decoder_outputs.logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_attention_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ decoder_delay_pattern_mask=None,
+ guidance_scale=None,
+ **kwargs,
+ ):
+ # Overwritten -- MusicGen has custom processing
+ if decoder_delay_pattern_mask is None:
+ decoder_input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask(
+ decoder_input_ids,
+ self.generation_config.pad_token_id,
+ max_length=self.generation_config.max_length,
+ )
+
+ # apply the delay pattern mask
+ decoder_input_ids = self.decoder.apply_delay_pattern_mask(decoder_input_ids, decoder_delay_pattern_mask)
+
+ if guidance_scale is not None and guidance_scale > 1:
+ # for classifier free guidance we need to replicate the decoder args across the batch dim (we'll split these
+ # before sampling)
+ decoder_input_ids = decoder_input_ids.repeat((2, 1))
+ if decoder_attention_mask is not None:
+ decoder_attention_mask = decoder_attention_mask.repeat((2, 1))
+
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if decoder_input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
+
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "decoder_attention_mask": decoder_attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache,
+ }
+
+ def _prepare_decoder_input_ids_for_generation(
+ self,
+ batch_size: int,
+ model_input_name: str,
+ model_kwargs: Dict[str, torch.Tensor],
+ decoder_start_token_id: int = None,
+ bos_token_id: int = None,
+ device: torch.device = None,
+ ) -> Tuple[torch.LongTensor, Dict[str, torch.Tensor]]:
+ """Prepares `decoder_input_ids` for generation with encoder-decoder models"""
+
+ # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming,
+ # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input.
+ if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
+ decoder_input_ids = model_kwargs.pop("decoder_input_ids")
+ elif "input_ids" in model_kwargs and model_input_name != "input_ids":
+ decoder_input_ids = model_kwargs.pop("input_ids")
+ else:
+ decoder_input_ids = None
+
+ # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that.
+ decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
+ if device is None:
+ device = self.device
+ decoder_input_ids_start = (
+ torch.ones((batch_size * self.decoder.num_codebooks, 1), dtype=torch.long, device=device)
+ * decoder_start_token_id
+ )
+
+ # no user input -> use decoder_start_token_id as decoder_input_ids
+ if decoder_input_ids is None:
+ decoder_input_ids = decoder_input_ids_start
+
+ # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust
+ # decoder_attention_mask if provided)
+ elif (decoder_input_ids[..., 0] != decoder_start_token_id).all().item():
+ decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1)
+ if "decoder_attention_mask" in model_kwargs:
+ decoder_attention_mask = model_kwargs["decoder_attention_mask"]
+ decoder_attention_mask = torch.cat(
+ (torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask),
+ dim=-1,
+ )
+ model_kwargs["decoder_attention_mask"] = decoder_attention_mask
+
+ return decoder_input_ids, model_kwargs
+
+ def _prepare_text_encoder_kwargs_for_generation(
+ self,
+ inputs_tensor: torch.Tensor,
+ model_kwargs,
+ model_input_name: Optional[str],
+ generation_config: GenerationConfig,
+ ) -> Dict[str, Any]:
+ # 1. get text encoder
+ encoder = self.get_text_encoder()
+ # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device
+ # as the inputs.
+ if hasattr(encoder, "_hf_hook"):
+ encoder._hf_hook.io_same_device = True
+
+ # 2. Prepare encoder args and encoder kwargs from model kwargs.
+ irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
+ encoder_kwargs = {
+ argument: value
+ for argument, value in model_kwargs.items()
+ if not any(argument.startswith(p) for p in irrelevant_prefix)
+ }
+ encoder_signature = set(inspect.signature(encoder.forward).parameters)
+ encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
+ if not encoder_accepts_wildcard:
+ encoder_kwargs = {
+ argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
+ }
+ encoder_kwargs["output_attentions"] = generation_config.output_attentions
+ encoder_kwargs["output_hidden_states"] = generation_config.output_hidden_states
+ guidance_scale = generation_config.guidance_scale
+
+ # 3. make sure that encoder returns `ModelOutput`
+ model_input_name = model_input_name if model_input_name is not None else self.text_encoder.main_input_name
+ encoder_kwargs["return_dict"] = True
+ encoder_kwargs[model_input_name] = inputs_tensor
+ last_hidden_state = encoder(**encoder_kwargs).last_hidden_state
+
+ # for classifier free guidance we need to add a 'null' input to our encoder hidden states
+ if guidance_scale is not None and guidance_scale > 1:
+ last_hidden_state = torch.concatenate([last_hidden_state, torch.zeros_like(last_hidden_state)], dim=0)
+ if "attention_mask" in model_kwargs:
+ model_kwargs["attention_mask"] = torch.concatenate(
+ [model_kwargs["attention_mask"], torch.zeros_like(model_kwargs["attention_mask"])], dim=0
+ )
+
+ model_kwargs["encoder_outputs"] = BaseModelOutput(last_hidden_state=last_hidden_state)
+
+ return model_kwargs
+
+ def _prepare_audio_encoder_kwargs_for_generation(
+ self, input_values, model_kwargs, model_input_name: Optional[str] = None
+ ):
+ # 1. get audio encoder
+ encoder = self.get_audio_encoder()
+ # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device
+ # as the inputs.
+ if hasattr(encoder, "_hf_hook"):
+ encoder._hf_hook.io_same_device = True
+
+ # 2. Prepare encoder args and encoder kwargs from model kwargs.
+ irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
+ encoder_kwargs = {
+ argument: value
+ for argument, value in model_kwargs.items()
+ if not any(argument.startswith(p) for p in irrelevant_prefix)
+ }
+ encoder_signature = set(inspect.signature(encoder.forward).parameters)
+ encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
+ if not encoder_accepts_wildcard:
+ encoder_kwargs = {
+ argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
+ }
+
+ # 3. make sure that encoder returns `ModelOutput`
+ model_input_name = model_input_name if model_input_name is not None else self.audio_encoder.main_input_name
+ encoder_kwargs["return_dict"] = True
+
+ if self.decoder.config.audio_channels == 1:
+ encoder_kwargs[model_input_name] = input_values
+ audio_encoder_outputs = encoder.encode(**encoder_kwargs)
+ audio_codes = audio_encoder_outputs.audio_codes
+ audio_scales = audio_encoder_outputs.audio_scales
+
+ frames, bsz, codebooks, seq_len = audio_codes.shape
+
+ else:
+ if input_values.shape[1] != 2:
+ raise ValueError(
+ f"Expected stereo audio (2-channels) but example has {input_values.shape[1]} channel."
+ )
+
+ encoder_kwargs[model_input_name] = input_values[:, :1, :]
+ audio_encoder_outputs_left = encoder.encode(**encoder_kwargs)
+ audio_codes_left = audio_encoder_outputs_left.audio_codes
+ audio_scales_left = audio_encoder_outputs_left.audio_scales
+
+ encoder_kwargs[model_input_name] = input_values[:, 1:, :]
+ audio_encoder_outputs_right = encoder.encode(**encoder_kwargs)
+ audio_codes_right = audio_encoder_outputs_right.audio_codes
+ audio_scales_right = audio_encoder_outputs_right.audio_scales
+
+ frames, bsz, codebooks, seq_len = audio_codes_left.shape
+ # copy alternating left/right channel codes into stereo codebook
+ audio_codes = audio_codes_left.new_ones((frames, bsz, 2 * codebooks, seq_len))
+
+ audio_codes[:, :, ::2, :] = audio_codes_left
+ audio_codes[:, :, 1::2, :] = audio_codes_right
+
+ if audio_scales_left != [None] or audio_scales_right != [None]:
+ audio_scales = torch.stack([audio_scales_left, audio_scales_right], dim=1)
+ else:
+ audio_scales = [None] * bsz
+
+ if frames != 1:
+ raise ValueError(
+ f"Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is "
+ "disabled by setting `chunk_length=None` in the audio encoder."
+ )
+
+ decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len)
+
+ model_kwargs["decoder_input_ids"] = decoder_input_ids
+ model_kwargs["audio_scales"] = audio_scales
+ return model_kwargs
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return shift_tokens_right(labels, self.config.decoder.pad_token_id, self.config.decoder.bos_token_id)
+
+ def resize_token_embeddings(self, *args, **kwargs):
+ raise NotImplementedError(
+ "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the"
+ " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
+ " model.decoder.resize_token_embeddings(...))"
+ )
+
+ def freeze_audio_encoder(self):
+ """
+ Freeze the audio encoder weights.
+ """
+ for param in self.audio_encoder.parameters():
+ param.requires_grad = False
+ self.audio_encoder._requires_grad = False
+
+ def freeze_text_encoder(self):
+ """
+ Freeze the text encoder weights.
+ """
+ for param in self.text_encoder.parameters():
+ param.requires_grad = False
+ self.text_encoder._requires_grad = False
+
+ def _maybe_initialize_input_ids_for_generation(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ bos_token_id: Optional[int] = None,
+ model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
+ ) -> torch.LongTensor:
+ """Initializes input ids for generation, if necessary."""
+ if inputs is not None:
+ return inputs
+
+ encoder_outputs = model_kwargs.get("encoder_outputs")
+ if encoder_outputs is not None:
+ # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding
+ shape = encoder_outputs[0].size()[:-1]
+ return torch.ones(shape, dtype=torch.long, device=self.device) * -100
+
+ if bos_token_id is None:
+ raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
+
+ # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with
+ # soft-prompting or in multimodal implementations built on top of decoder-only language models.
+ batch_size = 1
+ for value in model_kwargs.values():
+ if isinstance(value, torch.Tensor):
+ batch_size = value.shape[0]
+ break
+ return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id
+
+ def _get_decoder_start_token_id(
+ self, decoder_start_token_id: Union[int, List[int]] = None, bos_token_id: int = None
+ ) -> int:
+ decoder_start_token_id = (
+ decoder_start_token_id
+ if decoder_start_token_id is not None
+ else self.generation_config.decoder_start_token_id
+ )
+ bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
+
+ if decoder_start_token_id is not None:
+ return decoder_start_token_id
+ elif bos_token_id is not None:
+ return bos_token_id
+ raise ValueError(
+ "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ generation_config: Optional[GenerationConfig] = None,
+ logits_processor: Optional[LogitsProcessorList] = None,
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
+ synced_gpus: Optional[bool] = None,
+ streamer: Optional["BaseStreamer"] = None,
+ **kwargs,
+ ):
+ """
+
+ Generates sequences of token ids for models with a language modeling head.
+
+
+
+ Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
+ model's default generation configuration. You can override any `generation_config` by passing the corresponding
+ parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+ Parameters:
+ inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
+ The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
+ method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
+ should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of
+ `input_ids`, `input_values`, `input_features`, or `pixel_values`.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and
+ generation config. If a logit processor is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ Custom stopping criteria that complement the default stopping criteria built from arguments and a
+ generation config. If a stopping criteria is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed to avoid deadlocking with
+ `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
+ streamer (`BaseStreamer`, *optional*):
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
+ specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
+
+ Return:
+ [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
+ or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.
+
+ If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
+ [`~utils.ModelOutput`] types are:
+
+ - [`~generation.GenerateDecoderOnlyOutput`],
+ - [`~generation.GenerateBeamDecoderOnlyOutput`]
+
+ If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
+ [`~utils.ModelOutput`] types are:
+
+ - [`~generation.GenerateEncoderDecoderOutput`],
+ - [`~generation.GenerateBeamEncoderDecoderOutput`]
+ """
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects
+ if generation_config is None:
+ generation_config = self.generation_config
+
+ generation_config = copy.deepcopy(generation_config)
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
+ generation_config.validate()
+ self._validate_model_kwargs(model_kwargs.copy())
+
+ if model_kwargs.get("encoder_outputs") is not None and type(model_kwargs["encoder_outputs"]) is tuple:
+ # wrap the unconditional outputs as a BaseModelOutput for compatibility with the rest of generate
+ model_kwargs["encoder_outputs"] = BaseModelOutput(last_hidden_state=model_kwargs["encoder_outputs"][0])
+
+ # 2. Set generation parameters if not already defined
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
+
+ requires_attention_mask = "encoder_outputs" not in model_kwargs
+ kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
+
+ # 3. Define model inputs
+ inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
+ inputs, generation_config.bos_token_id, model_kwargs
+ )
+ batch_size = inputs_tensor.shape[0]
+ self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=inputs_tensor.device)
+
+ # 4. Define other model kwargs
+ model_kwargs["use_cache"] = generation_config.use_cache
+ model_kwargs["guidance_scale"] = generation_config.guidance_scale
+
+ if model_kwargs.get("attention_mask", None) is None and requires_attention_mask:
+ model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
+ inputs_tensor, generation_config, model_kwargs
+ )
+
+ if "encoder_outputs" not in model_kwargs:
+ # encoder_outputs are created and added to `model_kwargs`
+ model_kwargs = self._prepare_text_encoder_kwargs_for_generation(
+ inputs_tensor, model_kwargs, model_input_name, generation_config
+ )
+
+ if "decoder_input_ids" not in model_kwargs and "input_values" in model_kwargs:
+ model_kwargs = self._prepare_audio_encoder_kwargs_for_generation(
+ model_kwargs["input_values"],
+ model_kwargs,
+ )
+
+ # 5. Prepare `input_ids` which will be used for auto-regressive generation
+ input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
+ batch_size=batch_size,
+ model_input_name=model_input_name,
+ model_kwargs=model_kwargs,
+ decoder_start_token_id=generation_config._decoder_start_token_tensor,
+ bos_token_id=generation_config._bos_token_tensor,
+ device=inputs_tensor.device,
+ )
+
+ # 6. Prepare `max_length` depending on other stopping criteria.
+ input_ids_length = input_ids.shape[-1]
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
+ has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
+ generation_config = self._prepare_generated_length(
+ generation_config=generation_config,
+ has_default_max_length=has_default_max_length,
+ has_default_min_length=has_default_min_length,
+ model_input_name=model_input_name,
+ inputs_tensor=inputs_tensor,
+ input_ids_length=input_ids_length,
+ )
+
+ # build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to MusicGen)
+ input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask(
+ input_ids,
+ pad_token_id=generation_config._decoder_start_token_tensor,
+ max_length=generation_config.max_length,
+ )
+ # stash the delay mask so that we don't have to recompute in each forward pass
+ model_kwargs["decoder_delay_pattern_mask"] = decoder_delay_pattern_mask
+
+ # input_ids are ready to be placed on the streamer (if used)
+ if streamer is not None:
+ streamer.put(input_ids.cpu())
+
+ # 7. determine generation mode
+ generation_mode = generation_config.get_generation_mode()
+
+ # 8. prepare batched CFG externally (to enable coexistance with the unbatched CFG)
+ if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1:
+ logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale))
+ generation_config.guidance_scale = None
+
+ # 9. prepare distribution pre_processing samplers
+ logits_processor = self._get_logits_processor(
+ generation_config=generation_config,
+ input_ids_seq_length=input_ids_length,
+ encoder_input_ids=inputs_tensor,
+ prefix_allowed_tokens_fn=None,
+ logits_processor=logits_processor,
+ device=input_ids.device,
+ )
+
+ # 10. prepare stopping criteria
+ stopping_criteria = self._get_stopping_criteria(
+ generation_config=generation_config, stopping_criteria=stopping_criteria
+ )
+
+ if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH):
+ # expand input_ids with `num_return_sequences` additional sequences per batch
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
+ input_ids=input_ids,
+ expand_size=generation_config.num_return_sequences,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ **model_kwargs,
+ )
+
+ # 11. run sample
+ outputs = self._sample(
+ input_ids,
+ logits_processor=logits_processor,
+ stopping_criteria=stopping_criteria,
+ generation_config=generation_config,
+ synced_gpus=synced_gpus,
+ streamer=streamer,
+ **model_kwargs,
+ )
+
+ else:
+ raise ValueError(
+ "Got incompatible mode for generation, should be one of greedy or sampling. "
+ "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`."
+ )
+
+ if generation_config.return_dict_in_generate:
+ output_ids = outputs.sequences
+ else:
+ output_ids = outputs
+
+ # apply the pattern mask to the final ids
+ output_ids = self.decoder.apply_delay_pattern_mask(output_ids, model_kwargs["decoder_delay_pattern_mask"])
+
+ # revert the pattern delay mask by filtering the pad token id
+ output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape(
+ batch_size, self.decoder.num_codebooks, -1
+ )
+
+ # append the frame dimension back to the audio codes
+ output_ids = output_ids[None, ...]
+
+ audio_scales = model_kwargs.get("audio_scales")
+ if audio_scales is None:
+ audio_scales = [None] * batch_size
+
+ if self.decoder.config.audio_channels == 1:
+ output_values = self.audio_encoder.decode(
+ output_ids,
+ audio_scales=audio_scales,
+ ).audio_values
+ else:
+ codec_outputs_left = self.audio_encoder.decode(output_ids[:, :, ::2, :], audio_scales=audio_scales)
+ output_values_left = codec_outputs_left.audio_values
+
+ codec_outputs_right = self.audio_encoder.decode(output_ids[:, :, 1::2, :], audio_scales=audio_scales)
+ output_values_right = codec_outputs_right.audio_values
+
+ output_values = torch.cat([output_values_left, output_values_right], dim=1)
+
+ if generation_config.return_dict_in_generate:
+ outputs.sequences = output_values
+ return outputs
+ else:
+ return output_values
+
+ def get_unconditional_inputs(self, num_samples=1):
+ """
+ Helper function to get null inputs for unconditional generation, enabling the model to be used without the
+ feature extractor or tokenizer.
+
+ Args:
+ num_samples (int, *optional*):
+ Number of audio samples to unconditionally generate.
+ max_new_tokens (int, *optional*):
+ Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of
+ longer inference (since more audio tokens need to be generated per sample).
+
+ Example:
+ ```python
+ >>> from transformers import MusicgenForConditionalGeneration
+
+ >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
+
+ >>> # get the unconditional (or 'null') inputs for the model
+ >>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1)
+ >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)
+ ```"""
+ last_hidden_state = torch.zeros(
+ (num_samples, 1, self.config.text_encoder.hidden_size), device=self.device, dtype=self.dtype
+ )
+
+ attention_mask = torch.zeros((num_samples, 1), device=self.device, dtype=torch.long)
+
+ return MusicgenUnconditionalInput(
+ encoder_outputs=(last_hidden_state,),
+ attention_mask=attention_mask,
+ guidance_scale=1.0,
+ )
+
+
+__all__ = ["MusicgenForConditionalGeneration", "MusicgenForCausalLM", "MusicgenModel", "MusicgenPreTrainedModel"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/musicgen/processing_musicgen.py b/janus/lib/python3.10/site-packages/transformers/models/musicgen/processing_musicgen.py
new file mode 100644
index 0000000000000000000000000000000000000000..deebf9045b4ffb7f9e438a1892fe651c3e2f54d6
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/musicgen/processing_musicgen.py
@@ -0,0 +1,144 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Text/audio processor class for MusicGen
+"""
+
+from typing import List, Optional
+
+import numpy as np
+
+from ...processing_utils import ProcessorMixin
+from ...utils import to_numpy
+
+
+class MusicgenProcessor(ProcessorMixin):
+ r"""
+ Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor
+ class.
+
+ [`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See
+ [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.
+
+ Args:
+ feature_extractor (`EncodecFeatureExtractor`):
+ An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input.
+ tokenizer (`T5Tokenizer`):
+ An instance of [`T5Tokenizer`]. The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "EncodecFeatureExtractor"
+ tokenizer_class = ("T5Tokenizer", "T5TokenizerFast")
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+ self.current_processor = self.feature_extractor
+ self._in_target_context_manager = False
+
+ def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
+ return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
+
+ def __call__(self, *args, **kwargs):
+ """
+ Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text`
+ argument to [`~T5Tokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
+ information.
+ """
+ # For backward compatibility
+ if self._in_target_context_manager:
+ return self.current_processor(*args, **kwargs)
+
+ audio = kwargs.pop("audio", None)
+ sampling_rate = kwargs.pop("sampling_rate", None)
+ text = kwargs.pop("text", None)
+ if len(args) > 0:
+ audio = args[0]
+ args = args[1:]
+
+ if audio is None and text is None:
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
+
+ if text is not None:
+ inputs = self.tokenizer(text, **kwargs)
+
+ if audio is not None:
+ audio_inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
+
+ if audio is None:
+ return inputs
+
+ elif text is None:
+ return audio_inputs
+
+ else:
+ inputs["input_values"] = audio_inputs["input_values"]
+ if "padding_mask" in audio_inputs:
+ inputs["padding_mask"] = audio_inputs["padding_mask"]
+ return inputs
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids
+ from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's
+ [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information.
+ """
+ audio_values = kwargs.pop("audio", None)
+ padding_mask = kwargs.pop("padding_mask", None)
+
+ if len(args) > 0:
+ audio_values = args[0]
+ args = args[1:]
+
+ if audio_values is not None:
+ return self._decode_audio(audio_values, padding_mask=padding_mask)
+ else:
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
+ docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ def _decode_audio(self, audio_values, padding_mask: Optional = None) -> List[np.ndarray]:
+ """
+ This method strips any padding from the audio values to return a list of numpy audio arrays.
+ """
+ audio_values = to_numpy(audio_values)
+ bsz, channels, seq_len = audio_values.shape
+
+ if padding_mask is None:
+ return list(audio_values)
+
+ padding_mask = to_numpy(padding_mask)
+
+ # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
+ # token (so that the generated audio values are **not** treated as padded tokens)
+ difference = seq_len - padding_mask.shape[-1]
+ padding_value = 1 - self.feature_extractor.padding_value
+ padding_mask = np.pad(padding_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value)
+
+ audio_values = audio_values.tolist()
+ for i in range(bsz):
+ sliced_audio = np.asarray(audio_values[i])[
+ padding_mask[i][None, :] != self.feature_extractor.padding_value
+ ]
+ audio_values[i] = sliced_audio.reshape(channels, -1)
+
+ return audio_values
+
+
+__all__ = ["MusicgenProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/paligemma/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/paligemma/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9048afe6adbdc0ad36007e02f60e899cae677c55
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/paligemma/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_paligemma import *
+ from .modeling_paligemma import *
+ from .processing_paligemma import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/paligemma/__pycache__/configuration_paligemma.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/paligemma/__pycache__/configuration_paligemma.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4196f87c14e1963ff7872f6a19d80ea4ddfbab52
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/paligemma/__pycache__/configuration_paligemma.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/paligemma/modeling_paligemma.py b/janus/lib/python3.10/site-packages/transformers/models/paligemma/modeling_paligemma.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6b8f2ac46ec249c0eaad4fd72f241941e4dbe2e
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/paligemma/modeling_paligemma.py
@@ -0,0 +1,623 @@
+# coding=utf-8
+# Copyright 2024 the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch PaliGemmamodel."""
+
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...cache_utils import Cache, HybridCache, StaticCache
+from ...generation import GenerationMixin
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_paligemma import PaliGemmaConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+from ..auto import AutoModel, AutoModelForCausalLM
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "PaliGemmaConfig"
+
+
+# Adapted from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position
+# But Paligemma has no causal mask on prefix
+def _prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask: torch.Tensor,
+ sequence_length: int,
+ target_length: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ min_dtype: float,
+ cache_position: torch.Tensor,
+ batch_size: int,
+ is_training: bool = False,
+ token_type_ids: torch.Tensor = None,
+ **kwargs,
+):
+ """
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
+
+ Args:
+ attention_mask (`torch.Tensor`):
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
+ sequence_length (`int`):
+ The sequence length being processed.
+ target_length (`int`):
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
+ dtype (`torch.dtype`):
+ The dtype to use for the 4D attention mask.
+ device (`torch.device`):
+ The device to plcae the 4D attention mask on.
+ min_dtype (`float`):
+ The minimum value representable with the dtype `dtype`.
+ cache_position (`torch.Tensor`):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ batch_size (`torch.Tensor`):
+ Batch size.
+ is_training (`bool`):
+ Whether the model is in training mode or in inference. The condition is checked by presence/absence of `token_type_ids/labels`
+ """
+ if attention_mask is not None and attention_mask.dim() == 4:
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
+ causal_mask = attention_mask
+ else:
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
+ # Causal diagonal mask only if training, otherwise attend to the whole prefix. Training-specific attn for prefix is handled below
+ if sequence_length != 1:
+ if is_training:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ else:
+ causal_mask[:, :sequence_length] = 0.0
+
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+ # we are training thus we need to create a full mask on the image + prefix but causal on suffix
+ if is_training:
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ token_type_ids[:, None, None, :].to(causal_mask.device) == 0, 0
+ )
+ return causal_mask
+
+
+@dataclass
+class PaliGemmaCausalLMOutputWithPast(ModelOutput):
+ """
+ Base class for PaliGemmacausal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ image_hidden_states (`torch.FloatTensor`, *optional*):
+ A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
+ image_hidden_states of the model produced by the vision encoder after projecting last hidden state.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ image_hidden_states: Optional[torch.FloatTensor] = None
+
+
+class PaliGemmaMultiModalProjector(nn.Module):
+ def __init__(self, config: PaliGemmaConfig):
+ super().__init__()
+ self.linear = nn.Linear(config.vision_config.hidden_size, config.vision_config.projection_dim, bias=True)
+
+ def forward(self, image_features):
+ hidden_states = self.linear(image_features)
+
+ return hidden_states
+
+
+PALIGEMMA_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`PaliGemmaConfig`] or [`PaliGemmaVisionConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
+ PALIGEMMA_START_DOCSTRING,
+)
+class PaliGemmaPreTrainedModel(PreTrainedModel):
+ config_class = PaliGemmaConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["PaliGemmaMultiModalProjector"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_cache_class = True
+ _supports_quantized_cache = True
+ _supports_static_cache = True
+ _supports_cache_class = True
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+
+ def _init_weights(self, module):
+ # important: this ported version of PaliGemmaisn't meant for training from scratch - only
+ # inference and fine-tuning
+ std = (
+ self.config.initializer_range
+ if hasattr(self.config, "initializer_range")
+ else self.config.text_config.initializer_range
+ )
+
+ if hasattr(module, "class_embedding"):
+ module.class_embedding.data.normal_(mean=0.0, std=std)
+
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+PALIGEMMA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
+ The tensors corresponding to the input images. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`SiglipImageProcessor.__call__`] for details ([]`PaliGemmaProcessor`] uses
+ [`SiglipImageProcessor`] for processing images).
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
+ the complete sequence length.
+"""
+
+
+@add_start_docstrings(
+ """The PALIGEMMA model which consists of a vision backbone and a language model.""",
+ PALIGEMMA_START_DOCSTRING,
+)
+class PaliGemmaForConditionalGeneration(PaliGemmaPreTrainedModel, GenerationMixin):
+ def __init__(self, config: PaliGemmaConfig):
+ super().__init__(config)
+ self.vision_tower = AutoModel.from_config(config=config.vision_config)
+ self.multi_modal_projector = PaliGemmaMultiModalProjector(config)
+ self.vocab_size = config.text_config.vocab_size
+
+ language_model = AutoModelForCausalLM.from_config(config=config.text_config)
+
+ if language_model._tied_weights_keys is not None:
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
+ self.language_model = language_model
+
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
+ self.post_init()
+
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_input_embeddings with Llava->PaliGemma
+ def get_input_embeddings(self):
+ return self.language_model.get_input_embeddings()
+
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_input_embeddings with Llava->PaliGemma
+ def set_input_embeddings(self, value):
+ self.language_model.set_input_embeddings(value)
+
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_output_embeddings with Llava->PaliGemma
+ def get_output_embeddings(self):
+ return self.language_model.get_output_embeddings()
+
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_output_embeddings with Llava->PaliGemma
+ def set_output_embeddings(self, new_embeddings):
+ self.language_model.set_output_embeddings(new_embeddings)
+
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_decoder with Llava->PaliGemma
+ def set_decoder(self, decoder):
+ self.language_model.set_decoder(decoder)
+
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_decoder with Llava->PaliGemma
+ def get_decoder(self):
+ return self.language_model.get_decoder()
+
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.tie_weights with Llava->PaliGemma
+ def tie_weights(self):
+ return self.language_model.tie_weights()
+
+ def _update_causal_mask(
+ self,
+ attention_mask,
+ token_type_ids,
+ past_key_values,
+ cache_position,
+ input_ids=None,
+ inputs_embeds=None,
+ is_training: bool = False,
+ ):
+ if self.config.text_config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and 0.0 in attention_mask:
+ return attention_mask
+ return None
+
+ using_static_cache = isinstance(past_key_values, StaticCache)
+ min_dtype = torch.finfo(self.dtype).min
+ inputs_lead_dim = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
+ sequence_length = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+ if using_static_cache:
+ target_length = past_key_values.get_max_cache_shape()
+ elif isinstance(past_key_values, HybridCache):
+ target_length = past_key_values.get_max_cache_shape()
+ else:
+ target_length = (
+ attention_mask.shape[-1]
+ if isinstance(attention_mask, torch.Tensor)
+ else cache_position[0] + sequence_length + 1
+ )
+
+ if attention_mask is not None and attention_mask.dim() == 4:
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
+ return attention_mask
+
+ causal_mask = torch.full(
+ (sequence_length, target_length), fill_value=min_dtype, dtype=self.dtype, device=cache_position.device
+ )
+ # Causal diagonal mask only if training, otherwise attend to the whole prefix. Training-specific attn for prefix is handled below
+ if sequence_length != 1:
+ if is_training:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ else:
+ causal_mask[:, :sequence_length] = 0.0
+
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+ # we are training thus we need to create a full mask on the image + prefix but causal on suffix
+ if is_training:
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ token_type_ids[:, None, None, :].to(causal_mask.device) == 0, 0
+ )
+ return causal_mask
+
+ def get_image_features(self, pixel_values: torch.FloatTensor):
+ """
+ Obtains image last hidden states from the vision tower and apply multimodal projection.
+
+ Args:
+ pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
+ The tensors corresponding to the input images.
+ Returns:
+ image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
+ """
+ image_outputs = self.vision_tower(pixel_values)
+ selected_image_feature = image_outputs.last_hidden_state
+ image_features = self.multi_modal_projector(selected_image_feature)
+ image_features = image_features / (self.config.text_config.hidden_size**0.5)
+ return image_features
+
+ @add_start_docstrings_to_model_forward(PALIGEMMA_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=PaliGemmaCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ pixel_values: torch.FloatTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ num_logits_to_keep: int = 0,
+ ) -> Union[Tuple, PaliGemmaCausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`.
+
+ num_logits_to_keep (`int`, *optional*):
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
+
+ >>> model = PaliGemmaForConditionalGeneration.from_pretrained("google/PaliGemma-test-224px-hf")
+ >>> processor = AutoProcessor.from_pretrained("google/PaliGemma-test-224px-hf")
+
+ >>> prompt = "answer en Where is the cow standing?"
+ >>> url = "https://huggingface.co/gv-hf/PaliGemma-test-224px-hf/resolve/main/cow_beach_1.png"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(**inputs, max_length=30)
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "answer en Where is the cow standing?\nbeach"
+ ```"""
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if pixel_values is not None and inputs_embeds is not None:
+ raise ValueError(
+ "You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one"
+ )
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ is_training = token_type_ids is not None and labels is not None
+
+ if inputs_embeds is None:
+ inputs_embeds = self.get_input_embeddings()(input_ids)
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0) + 1 # Paligemma positions are 1-indexed
+
+ # Merge text and images
+ if pixel_values is not None:
+ image_features = self.get_image_features(pixel_values)
+
+ special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1)
+ special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
+ if inputs_embeds[special_image_mask].numel() != image_features.numel():
+ image_tokens_in_text = torch.sum(input_ids == self.config.image_token_index)
+ raise ValueError(
+ f"Number of images does not match number of special image tokens in the input text. "
+ f"Got {image_tokens_in_text} image tokens in the text but {image_features.shape[0] * image_features.shape[1]} "
+ "tokens from image embeddings."
+ )
+ image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
+ inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
+
+ # mask out pad-token-ids in labels for BC
+ if labels is not None and self.pad_token_id in labels:
+ logger.warning_once(
+ "`labels` contains `pad_token_id` which will be masked with `config.ignore_index`. "
+ "You have to mask out `pad_token_id` when preparing `labels`, this behavior will be removed in v.4.46.",
+ )
+ labels = torch.where(input_ids == self.pad_token_id, self.config.ignore_index, labels)
+
+ causal_mask = self._update_causal_mask(
+ attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training
+ )
+ outputs = self.language_model(
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ num_logits_to_keep=num_logits_to_keep,
+ )
+
+ logits = outputs.logits
+ loss = None
+ if labels is not None:
+ # Upcast to float if we need to compute the loss to avoid potential precision issues
+ logits = logits.float()
+ shift_logits = logits[..., :-1, :]
+ shift_labels = labels[..., 1:]
+ if attention_mask is not None:
+ # we use the input attention mask to shift the logits and labels, because it is 2D.
+ # we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
+ shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device)
+ shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous()
+ shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous()
+ else:
+ shift_logits = shift_logits.contiguous()
+ shift_labels = shift_labels.contiguous()
+ # Flatten the tokens
+ loss_fct = nn.CrossEntropyLoss()
+
+ flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size)
+ flat_labels = shift_labels.view(-1).to(shift_logits.device)
+ loss = loss_fct(flat_logits, flat_labels)
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return PaliGemmaCausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ image_hidden_states=image_features if pixel_values is not None else None,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ inputs_embeds=None,
+ cache_position=None,
+ position_ids=None,
+ pixel_values=None,
+ attention_mask=None,
+ token_type_ids=None,
+ use_cache=True,
+ num_logits_to_keep=None,
+ labels=None,
+ **kwargs,
+ ):
+ # Overwritten -- custom `position_ids` and `pixel_values` handling
+ model_inputs = self.language_model.prepare_inputs_for_generation(
+ input_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ cache_position=cache_position,
+ use_cache=use_cache,
+ num_logits_to_keep=num_logits_to_keep,
+ token_type_ids=token_type_ids,
+ **kwargs,
+ )
+
+ # position_ids in Paligemma are 1-indexed
+ if model_inputs.get("position_ids") is not None:
+ model_inputs["position_ids"] += 1
+ # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
+ # Otherwise we need pixel values to be passed to model. NOTE: use_cache=False needs pixel_values always
+ if cache_position[0] == 0:
+ model_inputs["pixel_values"] = pixel_values
+ is_training = token_type_ids is not None and labels is not None
+ if cache_position[0] == 0 and isinstance(past_key_values, HybridCache):
+ causal_mask = self._update_causal_mask(
+ attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training
+ )
+ model_inputs["attention_mask"] = causal_mask
+ return model_inputs
+
+
+__all__ = ["PaliGemmaForConditionalGeneration", "PaliGemmaPreTrainedModel"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/configuration_qwen2.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/configuration_qwen2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..60b218b40bf66d24abb56bdc287671d170c6cde0
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/qwen2/__pycache__/configuration_qwen2.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b0b67aa1c283f95fcff89a67d3f251b18665f6e
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py b/janus/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3ca787691c4af7ebb04376707572b8df21889ac
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py
@@ -0,0 +1,1644 @@
+# coding=utf-8
+# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""RAG model implementation."""
+
+import copy
+from dataclasses import dataclass
+from typing import Callable, List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+from ...configuration_utils import PretrainedConfig
+from ...generation import BeamSearchScorer, GenerationConfig, LogitsProcessorList, StoppingCriteriaList
+from ...modeling_outputs import ModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_rag import RagConfig
+from .retrieval_rag import RagRetriever
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "RagConfig"
+
+
+@dataclass
+class RetrievAugLMMarginOutput(ModelOutput):
+ """
+ Base class for retriever augmented marginalized models outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
+ each vocabulary token.
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_heads, sequence_length, embed_size_per_head)`).
+
+ Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
+ (see `past_key_values` input) to speed up sequential decoding.
+ retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
+ Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
+ the `doc_scores`.
+ retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
+ The indexes of the embedded documents retrieved by the retriever.
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+ question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
+ model.
+ question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
+ question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
+ generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
+ generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
+ generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ doc_scores: torch.FloatTensor = None
+ past_key_values: Optional[List[torch.FloatTensor]] = None
+ retrieved_doc_embeds: Optional[torch.FloatTensor] = None
+ retrieved_doc_ids: Optional[torch.LongTensor] = None
+ context_input_ids: Optional[torch.LongTensor] = None
+ context_attention_mask: Optional[torch.LongTensor] = None
+ question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
+ generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class RetrievAugLMOutput(ModelOutput):
+ """
+ Args:
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
+ each vocabulary token.
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_heads, sequence_length, embed_size_per_head)`).
+
+ Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
+ (see `past_key_values` input) to speed up sequential decoding.
+ retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
+ Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
+ the `doc_scores`.
+ retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
+ The indexes of the embedded documents retrieved by the retriever.
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+ question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
+ model.
+ question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
+ question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
+ generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
+ generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
+ generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ """
+
+ logits: torch.FloatTensor = None
+ doc_scores: torch.FloatTensor = None
+ past_key_values: Optional[List[torch.FloatTensor]] = None
+ retrieved_doc_embeds: Optional[torch.FloatTensor] = None
+ retrieved_doc_ids: Optional[torch.LongTensor] = None
+ context_input_ids: Optional[torch.LongTensor] = None
+ context_attention_mask: Optional[torch.LongTensor] = None
+ question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
+ generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+class RagPreTrainedModel(PreTrainedModel):
+ r"""
+ RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP
+ Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.
+
+ RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a
+ generator, the encoder and generator are trainable while the retriever is just an indexed dataset.
+
+ """
+
+ config_class = RagConfig
+ base_model_prefix = "rag"
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ # At the moment fast initialization is not supported
+ # for composite models
+ kwargs["_fast_init"] = False
+ return super().from_pretrained(*args, **kwargs)
+
+ @classmethod
+ def from_pretrained_question_encoder_generator(
+ cls,
+ question_encoder_pretrained_model_name_or_path: str = None,
+ generator_pretrained_model_name_or_path: str = None,
+ retriever: RagRetriever = None,
+ **kwargs,
+ ) -> PreTrainedModel:
+ r"""
+ Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
+ model checkpoints.
+
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
+ the model, you need to first set it back in training mode with `model.train()`.
+
+ Params:
+ question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the question encoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+
+ generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the generator. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaining positional arguments will be passed to the underlying model's `__init__` method.
+ retriever ([`RagRetriever`], *optional*):
+ The retriever to use.
+ kwwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the question_encoder configuration, use the prefix *question_encoder_* for each
+ configuration parameter.
+ - To update the generator configuration, use the prefix *generator_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import RagModel
+
+ >>> # initialize a RAG from two pretrained models.
+ >>> model = RagModel.from_pretrained_question_encoder_generator(
+ ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small"
+ ... )
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./rag")
+ >>> # load fine-tuned model
+ >>> model = RagModel.from_pretrained("./rag")
+ ```"""
+
+ kwargs_question_encoder = {
+ argument[len("question_encoder_") :]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("question_encoder_")
+ }
+
+ kwargs_generator = {
+ argument[len("generator_") :]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("generator_")
+ }
+
+ # remove question_encoder, generator kwargs from kwargs
+ for key in kwargs_question_encoder.keys():
+ del kwargs["question_encoder_" + key]
+ for key in kwargs_generator.keys():
+ del kwargs["generator_" + key]
+
+ # Load and initialize the question_encoder and generator
+ # The distinction between question_encoder and generator at the model level is made
+ # by the value of the flag `is_generator` that we need to set correctly.
+ question_encoder = kwargs_question_encoder.pop("model", None)
+ if question_encoder is None:
+ assert question_encoder_pretrained_model_name_or_path is not None, (
+ "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to"
+ " be defined"
+ )
+ from ..auto.modeling_auto import AutoModel
+
+ if "config" not in kwargs_question_encoder:
+ from ..auto.configuration_auto import AutoConfig
+
+ question_encoder_config, kwargs_question_encoder = AutoConfig.from_pretrained(
+ question_encoder_pretrained_model_name_or_path,
+ **kwargs_question_encoder,
+ return_unused_kwargs=True,
+ )
+ kwargs_question_encoder["config"] = question_encoder_config
+
+ question_encoder = AutoModel.from_pretrained(
+ question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder
+ )
+
+ generator = kwargs_generator.pop("model", None)
+ if generator is None:
+ assert generator_pretrained_model_name_or_path is not None, (
+ "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has"
+ " to be defined"
+ )
+ from ..auto.modeling_auto import AutoModelForSeq2SeqLM
+
+ if "config" not in kwargs_generator:
+ from ..auto.configuration_auto import AutoConfig
+
+ generator_config, kwargs_generator = AutoConfig.from_pretrained(
+ generator_pretrained_model_name_or_path, **kwargs_generator, return_unused_kwargs=True
+ )
+
+ kwargs_generator["config"] = generator_config
+
+ generator = AutoModelForSeq2SeqLM.from_pretrained(
+ generator_pretrained_model_name_or_path, **kwargs_generator
+ )
+
+ # instantiate config with corresponding kwargs
+ config = kwargs.get("config", None)
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+
+ return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
+
+
+RAG_START_DOCSTRING = r"""
+
+ RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward
+ pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context
+ documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.
+
+ The question encoder can be any *autoencoding* model, preferably [`DPRQuestionEncoder`], and the generator can be
+ any *seq2seq* model, preferably [`BartForConditionalGeneration`].
+
+ The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the
+ outputs of a retriever in multiple steps---see examples for more details. The model is compatible any
+ *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`.
+ It has been tested with [`DPRQuestionEncoder`] as the `question_encoder` and [`BartForConditionalGeneration`] or
+ [`T5ForConditionalGeneration`] as the `generator`.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+
+ Args:
+ config ([`RagConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ question_encoder ([`PreTrainedModel`]):
+ An encoder model compatible with the faiss index encapsulated by the `retriever`.
+ generator ([`PreTrainedModel`]):
+ A seq2seq model used as the generator in the RAG architecture.
+ retriever ([`RagRetriever`]):
+ A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.
+"""
+
+
+RAG_FORWARD_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
+ which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
+ obtain the indices.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)
+ Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
+ *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
+ sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
+ generator's encoder.
+
+ Used by the ([`RagModel`]) model during decoding.
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Provide for generation tasks. `None` by default, construct as per instructions for the generator model
+ you're using with your RAG instance.
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`):
+ Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and
+ `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used
+ in the ([`RagTokenForGeneration`]) model during decoding.
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
+ has to be provided to the forward pass. `doc_scores` can be computed via
+ `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to
+ the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be
+ provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_retrieved(`bool`, *optional*):
+ Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
+ `context_attention_mask`. See returned tensors for more detail.
+ n_docs (`int`, *optional*, defaults to `config.n_docs``)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+"""
+
+
+@add_start_docstrings_to_model_forward(RAG_START_DOCSTRING)
+class RagModel(RagPreTrainedModel):
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[PreTrainedModel] = None,
+ generator: Optional[PreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None, # or maybe just use a `set_retriever(...)` method
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an question_encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+ else:
+ assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}"
+ super().__init__(config)
+ if question_encoder is None:
+ from ..auto.modeling_auto import AutoModel
+
+ question_encoder = AutoModel.from_config(config.question_encoder)
+
+ if generator is None:
+ from ..auto.modeling_auto import AutoModelForSeq2SeqLM
+
+ generator = AutoModelForSeq2SeqLM.from_config(config.generator)
+
+ self.retriever = retriever
+ if self.retriever is not None:
+ assert isinstance(
+ retriever, RagRetriever
+ ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`"
+ self.retriever = retriever
+
+ self.question_encoder = question_encoder
+ self.generator = generator
+
+ self.ctx_encoder = None
+ self.context_encoder_training = False
+
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=RetrievAugLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ doc_scores: Optional[torch.FloatTensor] = None,
+ context_input_ids: Optional[torch.LongTensor] = None,
+ context_attention_mask: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_retrieved: Optional[bool] = None,
+ n_docs: Optional[int] = None,
+ ) -> Union[Tuple[torch.Tensor], RetrievAugLMOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RagRetriever, RagModel
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever)
+
+ >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
+ >>> outputs = model(input_ids=inputs["input_ids"])
+ ```"""
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved
+
+ # whether retriever has to be used
+ has_to_retrieve = (
+ self.retriever is not None
+ and (context_input_ids is None or context_attention_mask is None or doc_scores is None)
+ and encoder_outputs is None
+ )
+ # encoder_outputs are pre-computed during RAG-token generation
+ if encoder_outputs is None:
+ if has_to_retrieve:
+ question_enc_outputs = self.question_encoder(
+ input_ids, attention_mask=attention_mask, return_dict=True
+ )
+ question_encoder_last_hidden_state = question_enc_outputs[0] # hidden states of question encoder
+
+ retriever_outputs = self.retriever(
+ input_ids,
+ question_encoder_last_hidden_state.cpu().detach().to(torch.float32).numpy(),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="pt",
+ )
+ if self.context_encoder_training:
+ (
+ context_input_ids,
+ context_attention_mask,
+ retrieved_doc_embeds,
+ retrived_doc_input_ids,
+ retrived_doc_attention_mask,
+ retrieved_doc_ids,
+ ) = (
+ retriever_outputs["context_input_ids"],
+ retriever_outputs["context_attention_mask"],
+ retriever_outputs["retrieved_doc_embeds"],
+ retriever_outputs["tokenized_doc_ids"],
+ retriever_outputs["tokenized_doc_attention_mask"],
+ retriever_outputs["doc_ids"],
+ )
+
+ context_input_ids = context_input_ids.to(input_ids)
+ context_attention_mask = context_attention_mask.to(input_ids)
+
+ retrived_doc_input_ids = retrived_doc_input_ids.to(input_ids)
+ retrived_doc_attention_mask = retrived_doc_attention_mask.to(input_ids)
+ retrieved_doc_embeds = self.ctx_encoder(
+ retrived_doc_input_ids, attention_mask=retrived_doc_attention_mask, return_dict=True
+ ).pooler_output
+ retrieved_doc_embeds = retrieved_doc_embeds.view(
+ -1, n_docs, question_encoder_last_hidden_state.shape[1]
+ ) # reshaping
+
+ # compute doc_scores involving ctx_encoder
+ doc_scores = torch.bmm(
+ question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)
+ ).squeeze(1)
+
+ else:
+ context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (
+ retriever_outputs["context_input_ids"],
+ retriever_outputs["context_attention_mask"],
+ retriever_outputs["retrieved_doc_embeds"],
+ retriever_outputs["doc_ids"],
+ )
+
+ # set to correct device
+ retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state)
+ context_input_ids = context_input_ids.to(input_ids)
+ context_attention_mask = context_attention_mask.to(input_ids)
+
+ # compute doc_scores
+ doc_scores = torch.bmm(
+ question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)
+ ).squeeze(1)
+ else:
+ assert context_input_ids is not None, (
+ "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can"
+ " set a retriever using the `set_retriever(...)` function."
+ )
+ assert context_attention_mask is not None, (
+ "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you"
+ " can set a retriever using the `set_retriever(...)` function."
+ )
+ assert doc_scores is not None, (
+ "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a"
+ " retriever using the `set_retriever(...)` function."
+ )
+
+ assert (
+ doc_scores is not None
+ ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function."
+
+ assert (doc_scores.shape[1] % n_docs) == 0, (
+ f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
+ f" {context_input_ids.shape[0]}."
+ )
+
+ # Decoder input without context documents
+ if decoder_input_ids is not None:
+ decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0)
+
+ if decoder_attention_mask is not None:
+ decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0)
+
+ gen_outputs = self.generator(
+ input_ids=context_input_ids,
+ attention_mask=context_attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ return_dict=True,
+ )
+
+ if not has_to_retrieve:
+ question_encoder_last_hidden_state = None
+ question_enc_hidden_states = None
+ question_enc_attentions = None
+ retrieved_doc_embeds = None
+ retrieved_doc_ids = None
+ else:
+ question_enc_hidden_states = question_enc_outputs.hidden_states
+ question_enc_attentions = question_enc_outputs.attentions
+
+ if not has_to_retrieve or not output_retrieved:
+ # don't output retrieved docs
+ context_input_ids = (None,)
+ context_attention_mask = None
+ retrieved_doc_embeds = None
+ retrieved_doc_ids = None
+
+ return RetrievAugLMOutput(
+ logits=gen_outputs.logits,
+ doc_scores=doc_scores,
+ past_key_values=gen_outputs.past_key_values,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ retrieved_doc_embeds=retrieved_doc_embeds,
+ retrieved_doc_ids=retrieved_doc_ids,
+ question_encoder_last_hidden_state=question_encoder_last_hidden_state,
+ question_enc_hidden_states=question_enc_hidden_states,
+ question_enc_attentions=question_enc_attentions,
+ generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state,
+ generator_enc_hidden_states=gen_outputs.encoder_hidden_states,
+ generator_enc_attentions=gen_outputs.encoder_attentions,
+ generator_dec_hidden_states=gen_outputs.decoder_hidden_states,
+ generator_dec_attentions=gen_outputs.decoder_attentions,
+ generator_cross_attentions=gen_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings_to_model_forward(
+ """
+ A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.
+ """,
+ RAG_START_DOCSTRING,
+)
+class RagSequenceForGeneration(RagPreTrainedModel):
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[PreTrainedModel] = None,
+ generator: Optional[PreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None,
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+ super().__init__(config)
+
+ # instantiate model
+ self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
+
+ def set_retriever(self, retriever: RagRetriever):
+ self.rag.retriever = retriever
+
+ def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
+ self.rag.context_encoder_training = True
+ self.rag.ctx_encoder = ctx_encoder
+
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ context_input_ids: Optional[torch.LongTensor] = None,
+ context_attention_mask: Optional[torch.LongTensor] = None,
+ doc_scores: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_retrieved: Optional[bool] = None,
+ exclude_bos_score: Optional[bool] = None,
+ reduce_loss: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ n_docs: Optional[int] = None,
+ **kwargs, # needs kwargs for generation
+ ) -> RetrievAugLMMarginOutput:
+ r"""
+ exclude_bos_score (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing
+ the loss.
+ reduce_loss (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
+ operation.
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
+ Legacy dictionary, which is required so that model can use *generate()* function.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RagRetriever, RagSequenceForGeneration
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
+
+ >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
+ >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
+ >>> input_ids = inputs["input_ids"]
+ >>> labels = targets["input_ids"]
+ >>> outputs = model(input_ids=input_ids, labels=labels)
+
+ >>> # or use retriever separately
+ >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True)
+ >>> # 1. Encode
+ >>> question_hidden_states = model.question_encoder(input_ids)[0]
+ >>> # 2. Retrieve
+ >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
+ >>> doc_scores = torch.bmm(
+ ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
+ ... ).squeeze(1)
+ >>> # 3. Forward to generator
+ >>> outputs = model(
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... decoder_input_ids=labels,
+ ... )
+ ```"""
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score
+ reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
+
+ if labels is not None:
+ if decoder_input_ids is None:
+ decoder_input_ids = labels
+ use_cache = False
+
+ outputs = self.rag(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ doc_scores=doc_scores,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_retrieved=output_retrieved,
+ n_docs=n_docs,
+ )
+
+ loss = None
+ if labels is not None:
+ loss = self.get_nll(
+ outputs.logits,
+ outputs.doc_scores,
+ decoder_input_ids,
+ reduce_loss=reduce_loss,
+ epsilon=self.config.label_smoothing,
+ exclude_bos_score=exclude_bos_score,
+ n_docs=n_docs,
+ )
+
+ return RetrievAugLMMarginOutput(
+ loss=loss,
+ logits=outputs.logits,
+ doc_scores=outputs.doc_scores,
+ past_key_values=outputs.past_key_values,
+ context_input_ids=outputs.context_input_ids,
+ context_attention_mask=outputs.context_attention_mask,
+ retrieved_doc_embeds=outputs.retrieved_doc_embeds,
+ retrieved_doc_ids=outputs.retrieved_doc_ids,
+ question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
+ question_enc_hidden_states=outputs.question_enc_hidden_states,
+ question_enc_attentions=outputs.question_enc_attentions,
+ generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
+ generator_enc_hidden_states=outputs.generator_enc_hidden_states,
+ generator_enc_attentions=outputs.generator_enc_attentions,
+ generator_dec_hidden_states=outputs.generator_dec_hidden_states,
+ generator_dec_attentions=outputs.generator_dec_attentions,
+ generator_cross_attentions=outputs.generator_cross_attentions,
+ )
+
+ @property
+ def retriever(self):
+ return self.rag.retriever
+
+ @property
+ def generator(self):
+ return self.rag.generator
+
+ @property
+ def question_encoder(self):
+ return self.rag.question_encoder
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ context_input_ids: Optional[torch.LongTensor] = None,
+ context_attention_mask: Optional[torch.LongTensor] = None,
+ doc_scores: Optional[torch.FloatTensor] = None,
+ do_deduplication: Optional[bool] = None, # defaults to True
+ num_return_sequences: Optional[int] = None, # defaults to 1
+ num_beams: Optional[int] = None, # defaults to 1
+ n_docs: Optional[int] = None,
+ **model_kwargs,
+ ) -> torch.LongTensor:
+ """
+ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation
+ for more information on how to set other generate input parameters.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The sequence used as a prompt for the generation. If `input_ids` is not passed, then
+ `context_input_ids` has to be provided.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
+ retriever.
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and
+ `context_attention_mask` have to be provided to the forward pass. They are returned by
+ [`~RagRetriever.__call__`].
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+
+ If the model is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be
+ provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`].
+ do_deduplication (`bool`, *optional*):
+ Whether or not to deduplicate the generations from different context documents for a given input. Has
+ to be set to `False` if used while training with distributed backend.
+ num_return_sequences(`int`, *optional*, defaults to 1):
+ The number of independently computed returned sequences for each element in the batch. Note that this
+ is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function,
+ where we set `num_return_sequences` to `num_beams`.
+ num_beams (`int`, *optional*, defaults to 1):
+ Number of beams for beam search. 1 means no beam search.
+ n_docs (`int`, *optional*, defaults to `config.n_docs`)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional kwargs will be passed to [`~generation.GenerationMixin.generate`].
+
+ Return:
+ `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
+ sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches
+ finished early due to the `eos_token_id`.
+ """
+
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
+ num_doc_return_sequences = (
+ num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
+ )
+ num_beams = num_beams if num_beams is not None else self.config.num_beams
+
+ assert (
+ input_ids is not None or context_input_ids is not None
+ ), " At least one of input_ids or context_input_ids must be given"
+
+ if self.retriever is not None and context_input_ids is None:
+ question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
+ context_input_ids = self.retriever(
+ input_ids,
+ question_hidden_states.cpu().detach().to(torch.float32).numpy(),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="pt",
+ )["context_input_ids"]
+
+ # set to correct device
+ context_input_ids = context_input_ids.to(input_ids)
+
+ hypos = []
+ model_kwargs["num_beams"] = num_beams
+ model_kwargs["num_return_sequences"] = num_beams
+ model_kwargs["attention_mask"] = None
+
+ batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
+
+ for index in range(batch_size):
+ # first, generate beams from documents:
+ generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)
+
+ output_sequences = self.generator.generate(
+ generator_input_ids,
+ **model_kwargs,
+ ) # n_docs * n_beam, tgt_len
+ if do_deduplication:
+ # do_deduplication, max_output_len
+ output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values()))
+
+ num_candidates = output_sequences.shape[
+ 0
+ ] # after deduplication, this number can be less than n_docs*n_beam
+
+ # then, run model forwards to get nll scores:
+ if input_ids is not None:
+ new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1)
+ outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
+ else: # input_ids is None, need context_input_ids/mask and doc_scores
+ assert context_attention_mask is not None, (
+ "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you"
+ " can set a retriever using the `set_retriever(...)` function."
+ )
+ assert doc_scores is not None, (
+ "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a"
+ " retriever using the `set_retriever(...)` function."
+ )
+
+ individual_input_ids = generator_input_ids.repeat(
+ num_candidates, 1
+ ) # (num_candidates*n_docs, max_len)
+
+ individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]
+ individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1)
+
+ individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]
+ individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs]
+
+ outputs = self(
+ context_input_ids=individual_input_ids,
+ context_attention_mask=individual_attention_mask,
+ doc_scores=individual_doc_scores,
+ labels=output_sequences,
+ exclude_bos_score=True,
+ )
+
+ top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1]
+
+ # add hypothesis
+ hypos.append(output_sequences[top_cand_inds])
+
+ return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
+
+ def get_nll(
+ self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
+ ):
+ # shift tokens left
+ target = torch.cat(
+ [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
+ )
+
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # bos_token_id is None for T5
+ bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
+ use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all()
+
+ def _mask_pads(ll, smooth_obj):
+ pad_mask = target.eq(self.config.generator.pad_token_id)
+ if pad_mask.any():
+ ll.masked_fill_(pad_mask, 0.0)
+ smooth_obj.masked_fill_(pad_mask, 0.0)
+ return ll.squeeze(-1), smooth_obj.squeeze(-1)
+
+ # seq_logits dim = (batch*n_docs, tgt_len , #vocabs)
+ seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(
+ seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
+ ) # batch_size x n_docs x tgt_len x #vocab_size
+ doc_logprobs = nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1)
+
+ # RAG-sequence marginalization
+ first_token_scores = seq_logprobs[:, :, :1, :]
+ second_token_scores = seq_logprobs[:, :, 1:2, :]
+ remainder = seq_logprobs[:, :, 2:, :]
+ rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2)
+
+ # calculate loss
+ target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1)
+ assert target.dim() == rag_logprobs.dim()
+
+ ll = rag_logprobs.gather(dim=-1, index=target)
+ smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
+
+ ll, smooth_obj = _mask_pads(ll, smooth_obj)
+
+ # sum over tokens, exclude bos while scoring
+ ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2)
+ smooth_obj = smooth_obj.sum(2)
+ ll = ll.logsumexp(1) # logsumexp over docs
+ smooth_obj = smooth_obj.logsumexp(1)
+
+ nll_loss = -ll
+ smooth_loss = -smooth_obj
+
+ if reduce_loss:
+ nll_loss = nll_loss.sum()
+ smooth_loss = smooth_loss.sum()
+
+ eps_i = epsilon / rag_logprobs.size(-1)
+ loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
+ return loss
+
+ @staticmethod
+ def _cat_and_pad(tensors, pad_token_id):
+ output = (
+ tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id)
+ )
+ ind = 0
+ for t in tensors:
+ output[ind : ind + t.shape[0], : t.shape[1]] = t
+ ind += t.shape[0]
+ return output
+
+
+@add_start_docstrings_to_model_forward(
+ """
+ A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.
+ """,
+ RAG_START_DOCSTRING,
+)
+class RagTokenForGeneration(RagPreTrainedModel):
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[PreTrainedModel] = None,
+ generator: Optional[PreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None,
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+
+ super().__init__(config)
+
+ # instantiate model
+ self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
+
+ def set_retriever(self, retriever: RagRetriever):
+ self.rag.retriever = retriever
+
+ def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
+ self.rag.context_encoder_training = True
+ self.rag.ctx_encoder = ctx_encoder
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ doc_scores=None,
+ n_docs=None,
+ **kwargs,
+ ):
+ # Overwritten -- `do_marginalize` is explicitly set in the output
+
+ if past_key_values is not None:
+ # if past is defined use only last decoder_input_ids
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None,
+ "encoder_outputs": encoder_outputs,
+ "doc_scores": doc_scores,
+ "context_attention_mask": attention_mask,
+ "decoder_input_ids": decoder_input_ids,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ "do_marginalize": True,
+ "n_docs": n_docs,
+ }
+
+ @property
+ def retriever(self):
+ return self.rag.retriever
+
+ @property
+ def generator(self):
+ return self.rag.generator
+
+ @property
+ def question_encoder(self):
+ return self.rag.question_encoder
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ """Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs"""
+
+ def _reorder_stacked(hidden_states, new_order):
+ n_docs = hidden_states.shape[0] // new_order.shape[0]
+ hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:])
+ hidden_states = hidden_states.index_select(0, new_order)
+ result = hidden_states.view(-1, *hidden_states.shape[2:])
+ return result
+
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # get the correct batch idx from decoder layer's batch dim for cross and self-attn
+ reordered_past += (
+ tuple(_reorder_stacked(past_state, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+
+ return reordered_past
+
+ def marginalize(self, seq_logits, doc_scores, n_docs=None):
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # RAG-token marginalization
+ seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(
+ seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
+ )
+ doc_logprobs = torch.log_softmax(doc_scores, dim=1)
+ log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1)
+ return torch.logsumexp(log_prob_sum, dim=1)
+
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ context_input_ids: Optional[torch.LongTensor] = None,
+ context_attention_mask: Optional[torch.LongTensor] = None,
+ doc_scores: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_retrieved: Optional[bool] = None,
+ do_marginalize: Optional[bool] = None,
+ reduce_loss: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ n_docs: Optional[int] = None,
+ **kwargs, # needs kwargs for generation
+ ) -> RetrievAugLMMarginOutput:
+ r"""
+ do_marginalize (`bool`, *optional*):
+ If `True`, the logits are marginalized over all documents by making use of
+ `torch.nn.functional.log_softmax`.
+ reduce_loss (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
+ operation.
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
+ Legacy dictionary, which is required so that model can use *generate()* function.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RagRetriever, RagTokenForGeneration
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
+
+ >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
+ >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
+ >>> input_ids = inputs["input_ids"]
+ >>> labels = targets["input_ids"]
+ >>> outputs = model(input_ids=input_ids, labels=labels)
+
+ >>> # or use retriever separately
+ >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True)
+ >>> # 1. Encode
+ >>> question_hidden_states = model.question_encoder(input_ids)[0]
+ >>> # 2. Retrieve
+ >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
+ >>> doc_scores = torch.bmm(
+ ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
+ ... ).squeeze(1)
+ >>> # 3. Forward to generator
+ >>> outputs = model(
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... decoder_input_ids=labels,
+ ... )
+
+ >>> # or directly generate
+ >>> generated = model.generate(
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... )
+ >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
+ ```"""
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize
+ reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
+
+ if labels is not None:
+ if decoder_input_ids is None:
+ decoder_input_ids = labels
+ use_cache = False
+
+ outputs = self.rag(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ doc_scores=doc_scores,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_retrieved=output_retrieved,
+ n_docs=n_docs,
+ )
+
+ loss = None
+ logits = outputs.logits
+ if labels is not None:
+ assert decoder_input_ids is not None
+ loss = self.get_nll(
+ outputs.logits,
+ outputs.doc_scores,
+ labels,
+ reduce_loss=reduce_loss,
+ epsilon=self.config.label_smoothing,
+ n_docs=n_docs,
+ )
+
+ if do_marginalize:
+ logits = self.marginalize(logits, outputs.doc_scores, n_docs)
+
+ return RetrievAugLMMarginOutput(
+ loss=loss,
+ logits=logits,
+ doc_scores=outputs.doc_scores,
+ past_key_values=outputs.past_key_values,
+ context_input_ids=outputs.context_input_ids,
+ context_attention_mask=outputs.context_attention_mask,
+ retrieved_doc_embeds=outputs.retrieved_doc_embeds,
+ retrieved_doc_ids=outputs.retrieved_doc_ids,
+ question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
+ question_enc_hidden_states=outputs.question_enc_hidden_states,
+ question_enc_attentions=outputs.question_enc_attentions,
+ generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
+ generator_enc_hidden_states=outputs.generator_enc_hidden_states,
+ generator_enc_attentions=outputs.generator_enc_attentions,
+ generator_dec_hidden_states=outputs.generator_dec_hidden_states,
+ generator_dec_attentions=outputs.generator_dec_attentions,
+ generator_cross_attentions=outputs.generator_cross_attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ context_input_ids: Optional[torch.LongTensor] = None,
+ context_attention_mask: Optional[torch.LongTensor] = None,
+ doc_scores: Optional[torch.FloatTensor] = None,
+ n_docs: Optional[int] = None,
+ generation_config: Optional[GenerationConfig] = None,
+ prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]] = None,
+ logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(),
+ stopping_criteria: Optional[StoppingCriteriaList] = StoppingCriteriaList(),
+ **kwargs,
+ ) -> torch.LongTensor:
+ """
+ Implements RAG token decoding.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The sequence used as a prompt for the generation. If `input_ids` is not passed, then
+ `context_input_ids` has to be provided.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ n_docs (`int`, *optional*, defaults to `config.n_docs`)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which has the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
+ If provided, this function constraints the beam search to allowed tokens only at each step. If not
+ provided no constraint is applied. This function takes 2 arguments `inputs_ids` and the batch ID
+ `batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on
+ the previously generated tokens `inputs_ids` and the batch ID `batch_id`. This argument is useful for
+ constrained generation conditioned on the prefix, as described in [Autoregressive Entity
+ Retrieval](https://arxiv.org/abs/2010.00904).
+ logits_processor (`LogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and a
+ model's config. If a logit processor is passed that is already created with the arguments or a model's
+ config an error is thrown.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ Custom stopping criteria that complement the default stopping criteria built from arguments and a
+ model's config. If a stopping criteria is passed that is already created with the arguments or a
+ model's config an error is thrown.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model.
+
+ Return:
+ `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
+ sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches
+ finished early due to the `eos_token_id`.
+ """
+ # Handle `generation_config` and kwargs that might update it
+ if generation_config is None:
+ generation_config = self.generation_config
+ generation_config = copy.deepcopy(generation_config)
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
+
+ kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
+ self._prepare_special_tokens(generation_config, kwargs_has_attention_mask)
+
+ # set default parameters
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # retrieve docs
+ if self.retriever is not None and context_input_ids is None:
+ question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
+ out = self.retriever(
+ input_ids,
+ question_hidden_states.cpu().detach().to(torch.float32).numpy(),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="pt",
+ )
+ context_input_ids, context_attention_mask, retrieved_doc_embeds = (
+ out["context_input_ids"],
+ out["context_attention_mask"],
+ out["retrieved_doc_embeds"],
+ )
+
+ # set to correct device
+ retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
+ context_input_ids = context_input_ids.to(input_ids)
+ context_attention_mask = context_attention_mask.to(input_ids)
+
+ # compute doc_scores
+ doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
+ 1
+ )
+
+ assert (context_input_ids.shape[0] % n_docs) == 0, (
+ f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
+ f" {context_input_ids.shape[0]}."
+ )
+
+ # batch_size
+ batch_size = context_input_ids.shape[0] // n_docs
+
+ encoder = self.rag.generator.get_encoder()
+ encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True)
+
+ input_ids = torch.full(
+ (batch_size * generation_config.num_beams, 1),
+ generation_config.decoder_start_token_id,
+ dtype=torch.long,
+ device=next(self.parameters()).device,
+ )
+ input_ids_seq_length = input_ids.shape[-1]
+ last_hidden_state = encoder_outputs["last_hidden_state"]
+
+ def extend_enc_output(tensor, num_beams=None):
+ # split into `batch_size`, `num_beams`, `num_docs`
+ tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:])
+ # repeat same last hidden states over `num_beams` dimension
+ tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:])
+ # merge `batch_size`, `num_beams`, `num_docs` dims again
+ return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:])
+
+ # correctly extend last_hidden_state and attention mask
+ context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams)
+ encoder_outputs["last_hidden_state"] = extend_enc_output(
+ last_hidden_state, num_beams=generation_config.num_beams
+ )
+
+ doc_scores = doc_scores.repeat_interleave(generation_config.num_beams, dim=0)
+
+ # define start_len & additional parameters
+ model_kwargs["doc_scores"] = doc_scores
+ model_kwargs["encoder_outputs"] = encoder_outputs
+ model_kwargs["attention_mask"] = context_attention_mask
+ model_kwargs["n_docs"] = n_docs
+
+ pre_processor = self._get_logits_processor(
+ generation_config=generation_config,
+ input_ids_seq_length=input_ids_seq_length,
+ encoder_input_ids=context_input_ids,
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
+ logits_processor=logits_processor,
+ device=input_ids.device,
+ )
+
+ prepared_stopping_criteria = self._get_stopping_criteria(
+ generation_config=generation_config, stopping_criteria=stopping_criteria
+ )
+
+ if generation_config.num_beams == 1:
+ if generation_config.num_return_sequences > 1:
+ raise ValueError(
+ f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
+ " greedy search."
+ )
+ return self._sample(
+ input_ids,
+ logits_processor=pre_processor,
+ stopping_criteria=prepared_stopping_criteria,
+ generation_config=generation_config,
+ synced_gpus=False,
+ streamer=None,
+ **model_kwargs,
+ )
+ elif generation_config.num_beams > 1:
+ if generation_config.num_return_sequences > generation_config.num_beams:
+ raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
+ beam_scorer = BeamSearchScorer(
+ batch_size=batch_size,
+ num_beams=generation_config.num_beams,
+ device=self.device,
+ length_penalty=generation_config.length_penalty,
+ do_early_stopping=generation_config.early_stopping,
+ num_beam_hyps_to_keep=generation_config.num_return_sequences,
+ max_length=generation_config.max_length,
+ )
+ return self._beam_search(
+ input_ids,
+ beam_scorer,
+ logits_processor=pre_processor,
+ stopping_criteria=prepared_stopping_criteria,
+ generation_config=generation_config,
+ synced_gpus=False,
+ **model_kwargs,
+ )
+ else:
+ raise ValueError(
+ f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}"
+ )
+
+ def get_input_embeddings(self):
+ return self.rag.generator.get_input_embeddings()
+
+ def get_output_embeddings(self):
+ return self.rag.generator.get_output_embeddings()
+
+ def set_output_embeddings(self, new_embeddings):
+ return self.rag.generator.set_output_embeddings(new_embeddings)
+
+ def shift_tokens_right(self, input_ids, start_token_id=None):
+ """Shift input ids one token to the right, and pad with start_token_id"""
+ if start_token_id is None:
+ start_token_id = self.config.decoder_start_token_id
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = start_token_id
+ return shifted_input_ids
+
+ def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ # shift tokens left
+ target = torch.cat(
+ [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
+ )
+
+ def _mask_pads(ll, smooth_obj):
+ pad_mask = target.eq(self.config.generator.pad_token_id)
+ if pad_mask.any():
+ ll.masked_fill_(pad_mask, 0.0)
+ smooth_obj.masked_fill_(pad_mask, 0.0)
+ return ll.squeeze(-1), smooth_obj.squeeze(-1)
+
+ rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
+
+ target = target.unsqueeze(-1)
+ assert target.dim() == rag_logprobs.dim()
+
+ ll = rag_logprobs.gather(dim=-1, index=target)
+ smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
+ ll, smooth_obj = _mask_pads(ll, smooth_obj)
+ ll = ll.sum(1) # sum over tokens
+ smooth_obj = smooth_obj.sum(1)
+
+ nll_loss = -ll
+ smooth_loss = -smooth_obj
+
+ if reduce_loss:
+ nll_loss = nll_loss.sum()
+ smooth_loss = smooth_loss.sum()
+
+ eps_i = epsilon / rag_logprobs.size(-1)
+ loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
+ return loss
+
+
+__all__ = ["RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/rag/modeling_tf_rag.py b/janus/lib/python3.10/site-packages/transformers/models/rag/modeling_tf_rag.py
new file mode 100644
index 0000000000000000000000000000000000000000..6714ac61a3bd32e4797a80088b64b439e694d715
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/rag/modeling_tf_rag.py
@@ -0,0 +1,1773 @@
+# coding=utf-8
+# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""TFRAG model implementation."""
+
+from __future__ import annotations
+
+import copy
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...configuration_utils import PretrainedConfig
+from ...generation import TFLogitsProcessorList
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ keras,
+ shape_list,
+ unpack_inputs,
+)
+from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_rag import RagConfig
+from .retrieval_rag import RagRetriever
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "RagConfig"
+
+
+@dataclass
+class TFRetrievAugLMMarginOutput(ModelOutput):
+ """
+ Base class for retriever augmented marginalized models outputs.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
+ each vocabulary token.
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
+ sequence_length, embed_size_per_head)`).
+
+ Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
+ (see `past_key_values` input) to speed up sequential decoding.
+ doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+ retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
+ Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
+ the `doc_scores`.
+ retrieved_doc_ids (`tf.Tensor` (int32) of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
+ The indexes of the embedded documents retrieved by the retriever.
+ context_input_ids (`tf.Tensor`(int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
+ context_attention_mask (`tf.Tensor` (int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+ question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
+ model.
+ question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
+ question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
+ generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
+ generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
+ generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ past_key_values: List[tf.Tensor] | None = None
+ doc_scores: tf.Tensor | None = None
+ retrieved_doc_embeds: tf.Tensor | None = None
+ retrieved_doc_ids: tf.Tensor | None = None
+ context_input_ids: tf.Tensor | None = None
+ context_attention_mask: tf.Tensor | None = None
+ question_encoder_last_hidden_state: tf.Tensor | None = None
+ question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ question_enc_attentions: Tuple[tf.Tensor, ...] | None = None
+ generator_enc_last_hidden_state: tf.Tensor | None = None
+ generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None
+ generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFRetrievAugLMOutput(ModelOutput):
+ """
+ Args:
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
+ each vocabulary token.
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
+ sequence_length, embed_size_per_head)`).
+
+ Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
+ (see `past_key_values` input) to speed up sequential decoding.
+ doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+ retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
+ Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
+ the `doc_scores`.
+ retrieved_doc_ids (`tf.Tensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
+ The indexes of the embedded documents retrieved by the retriever.
+ context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
+ context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+ question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
+ model.
+ question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
+ question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
+ generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
+ generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
+ generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ """
+
+ logits: tf.Tensor = None
+ past_key_values: List[tf.Tensor] | None = None
+ doc_scores: tf.Tensor | None = None
+ retrieved_doc_embeds: tf.Tensor | None = None
+ retrieved_doc_ids: tf.Tensor | None = None
+ context_input_ids: tf.Tensor | None = None
+ context_attention_mask: tf.Tensor | None = None
+ question_encoder_last_hidden_state: tf.Tensor | None = None
+ question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ question_enc_attentions: Tuple[tf.Tensor, ...] | None = None
+ generator_enc_last_hidden_state: tf.Tensor | None = None
+ generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None
+ generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+class TFRagPreTrainedModel(TFPreTrainedModel):
+ r"""
+ RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP
+ Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.
+
+ RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a
+ generator, the encoder and generator are trainable while the retriever is just an indexed dataset.
+
+ """
+
+ config_class = RagConfig
+ base_model_prefix = "rag"
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
+
+ @classmethod
+ def from_pretrained_question_encoder_generator(
+ cls,
+ question_encoder_pretrained_model_name_or_path: str = None,
+ generator_pretrained_model_name_or_path: str = None,
+ retriever: RagRetriever = None,
+ *model_args,
+ **kwargs,
+ ) -> TFPreTrainedModel:
+ r"""
+ Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
+ model checkpoints.
+
+ Params:
+ question_encoder_pretrained_model_name_or_path (`str`, *optional*):
+ Information necessary to initiate the question encoder. Can be either:
+
+ - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g.,
+ `google-bert/bert-base-uncased`.
+ - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g.,
+ `dbmdz/bert-base-german-cased`.
+ - A path to a *directory* containing model weights saved using
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
+ `question_encoder_from_pt` should be set to `True`.
+
+ generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the generator. Can be either:
+
+ - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g.,
+ `google-t5/t5-small`.
+ - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g.,
+ `facebook/bart-base`.
+ - A path to a *directory* containing model weights saved using
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
+ `generator_from_pt` should be set to `True`.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaining positional arguments will be passed to the underlying model's `__init__` method.
+ retriever ([`RagRetriever`], *optional*):
+ The retriever to use.
+ kwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the question_encoder configuration, use the prefix *question_encoder_* for each
+ configuration parameter.
+ - To update the generator configuration, use the prefix *generator_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import RagRetriever, TFRagModel
+
+ >>> # initialize a RAG from two pretrained models.
+ >>> model = TFRagModel.from_pretrained_question_encoder_generator(
+ ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small"
+ ... )
+ >>> # alternatively, initialize from pytorch pretrained models can also be done
+ >>> model = TFRagModel.from_pretrained_question_encoder_generator(
+ ... "facebook/dpr-question_encoder-single-nq-base",
+ ... "facebook/bart-base",
+ ... generator_from_pt=True,
+ ... question_encoder_from_pt=True,
+ ... )
+
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./rag")
+
+ >>> # load retriever
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # load fine-tuned model with retriever
+ >>> model = TFRagModel.from_pretrained("./rag", retriever=retriever)
+ ```"""
+
+ kwargs_question_encoder = {
+ argument[len("question_encoder_") :]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("question_encoder_")
+ }
+
+ kwargs_generator = {
+ argument[len("generator_") :]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("generator_")
+ }
+
+ # remove question_encoder, generator kwargs from kwargs
+ for key in kwargs_question_encoder.keys():
+ del kwargs["question_encoder_" + key]
+ for key in kwargs_generator.keys():
+ del kwargs["generator_" + key]
+
+ # Load and initialize the question_encoder and generator
+ # The distinction between question_encoder and generator at the model level is made
+ # by the value of the flag `is_generator` that we need to set correctly.
+ question_encoder = kwargs_question_encoder.pop("model", None)
+ if question_encoder is None:
+ assert question_encoder_pretrained_model_name_or_path is not None, (
+ "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to"
+ " be defined"
+ )
+
+ from ..auto.modeling_tf_auto import TFAutoModel
+
+ if "config" not in kwargs_question_encoder:
+ from ..auto.configuration_auto import AutoConfig
+
+ question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path)
+ kwargs_question_encoder["config"] = question_encoder_config
+
+ question_encoder = TFAutoModel.from_pretrained(
+ question_encoder_pretrained_model_name_or_path,
+ name="question_encoder",
+ load_weight_prefix=cls.load_weight_prefix,
+ *model_args,
+ **kwargs_question_encoder,
+ )
+
+ generator = kwargs_generator.pop("generator", None)
+ if generator is None:
+ assert generator_pretrained_model_name_or_path is not None, (
+ "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has"
+ " to be defined"
+ )
+
+ from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM
+
+ if "config" not in kwargs_generator:
+ from ..auto.configuration_auto import AutoConfig
+
+ generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path)
+ kwargs_generator["config"] = generator_config
+
+ generator = TFAutoModelForSeq2SeqLM.from_pretrained(
+ generator_pretrained_model_name_or_path,
+ name="generator",
+ load_weight_prefix=cls.load_weight_prefix,
+ **kwargs_generator,
+ )
+
+ # instantiate config with corresponding kwargs
+ config = kwargs.get("config", None)
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+
+ return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
+
+
+RAG_START_DOCSTRING = r"""
+
+ RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator.
+ During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract
+ relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to
+ the generator.
+
+ The question encoder can be any *autoencoding* model, preferably [`TFDPRQuestionEncoder`], and the generator can be
+ any *seq2seq* model, preferably [`TFBartForConditionalGeneration`].
+
+ The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the
+ outputs of a retriever in multiple steps---see examples for more details. The model is compatible any
+ *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`.
+ It has been tested with [`TFDPRQuestionEncoder`] as the `question_encoder` and [`TFBartForConditionalGeneration`]
+ as the `generator`.
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Tensorflow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)
+ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to
+ general usage and behavior.
+
+ The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in
+ SavedModel format.
+
+ Args:
+ config ([`RagConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+ question_encoder ([`TFPreTrainedModel`]):
+ An encoder model compatible with the faiss index encapsulated by the `retriever`.
+ generator ([`TFPreTrainedModel`]):
+ A seq2seq model used as the generator in the RAG architecture.
+ retriever ([`RagRetriever`]):
+ A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.
+"""
+
+
+RAG_FORWARD_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
+ which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
+ obtain the indices.
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*)
+ Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
+ *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
+ sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
+ generator's encoder.
+
+ Used by the ([`TFRagModel`]) model during decoding.
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Provide for generation tasks. `None` by default, construct as per instructions for the generator model
+ you're using with your RAG instance.
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ past_key_values (`tuple(tuple(tf.Tensor))`):
+ Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and
+ `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used
+ in the ([`RagTokenForGeneration`]) model during decoding.
+ doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
+ has to be provided to the forward pass. `doc_scores` can be computed via
+ `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
+ context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model has is not initialized with a `retriever` ``context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask
+ (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when
+ *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question
+ encoder `input_ids` by the retriever.
+
+ If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the
+ forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_retrieved(`bool`, *optional*):
+ Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
+ `context_attention_mask`. See returned tensors for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`TFRetrievAugLMOutput`] instead of a plain tuple.
+ n_docs (`int`, *optional*, defaults to `config.n_docs``)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+"""
+
+
+@add_start_docstrings_to_model_forward(RAG_START_DOCSTRING)
+class TFRagModel(TFRagPreTrainedModel):
+ load_weight_prefix = "tf_rag_model_1"
+
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[TFPreTrainedModel] = None,
+ generator: Optional[TFPreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None,
+ load_weight_prefix: Optional[str] = None,
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an question_encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+ else:
+ assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}"
+ super().__init__(config, **kwargs)
+
+ if question_encoder is None:
+ from ..auto.modeling_tf_auto import TFAutoModel
+
+ question_encoder = TFAutoModel.from_config(config.question_encoder, name="question_encoder")
+
+ if generator is None:
+ from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM
+
+ load_weight_prefix = load_weight_prefix if load_weight_prefix is not None else self.load_weight_prefix
+ generator = TFAutoModelForSeq2SeqLM.from_config(
+ config.generator, name="generator", load_weight_prefix=load_weight_prefix + "/generator"
+ )
+
+ self.retriever = retriever
+ if self.retriever is not None:
+ assert isinstance(
+ retriever, RagRetriever
+ ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`"
+ self.retriever = retriever
+
+ self.question_encoder = question_encoder
+ self.generator = generator
+
+ def set_retriever(self, retriever: RagRetriever):
+ self.retriever = retriever
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFRetrievAugLMOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None,
+ doc_scores: np.ndarray | tf.Tensor | None = None,
+ context_input_ids: np.ndarray | tf.Tensor | None = None,
+ context_attention_mask: np.ndarray | tf.Tensor | None = None,
+ use_cache: bool | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ output_retrieved: bool | None = None,
+ n_docs: int | None = None,
+ return_dict: bool | None = None,
+ training: bool = False,
+ **kwargs,
+ ) -> TFRetrievAugLMOutput:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RagRetriever, TFRagModel
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True)
+
+ >>> input_dict = tokenizer.prepare_seq2seq_batch(
+ ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
+ ... )
+ >>> input_ids = input_dict["input_ids"]
+ >>> outputs = model(input_ids)
+ ```"""
+ assert (
+ "decoder_cached_states" not in kwargs
+ ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
+
+ # aliasing to minimize code changing
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # whether retriever has to be used
+ has_to_retrieve = (
+ self.retriever is not None
+ and (context_input_ids is None or context_attention_mask is None or doc_scores is None)
+ and encoder_outputs is None
+ )
+
+ # encoder_outputs are pre-computed during RAG-token generation
+ if encoder_outputs is None:
+ if has_to_retrieve:
+ question_enc_outputs = self.question_encoder(
+ input_ids, attention_mask=attention_mask, return_dict=True, training=training
+ )
+ # see https://github.com/huggingface/transformers/blob/main/src/transformers/models/dpr/modeling_tf_dpr.py#L91
+ question_encoder_last_hidden_state = question_enc_outputs[
+ 0
+ ] # hidden states of question encoder => pooler_output
+
+ retriever_outputs = self.retriever(
+ input_ids,
+ question_encoder_last_hidden_state.numpy(),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="tf",
+ )
+ context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (
+ retriever_outputs["context_input_ids"],
+ retriever_outputs["context_attention_mask"],
+ retriever_outputs["retrieved_doc_embeds"],
+ retriever_outputs["doc_ids"],
+ )
+
+ context_input_ids = tf.cast(context_input_ids, tf.int32)
+ context_attention_mask = tf.cast(context_attention_mask, tf.int32)
+ retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
+ retrieved_doc_ids = tf.cast(retrieved_doc_ids, tf.int32)
+
+ # compute doc_scores
+ doc_scores = tf.squeeze(
+ tf.matmul(
+ tf.expand_dims(question_encoder_last_hidden_state, axis=1),
+ retrieved_doc_embeds,
+ transpose_b=True,
+ ),
+ axis=1,
+ )
+
+ else:
+ assert context_input_ids is not None, (
+ "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can"
+ " set a retriever using the `set_retriever(...)` function."
+ )
+ assert context_attention_mask is not None, (
+ "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you"
+ " can set a retriever using the `set_retriever(...)` function."
+ )
+ assert doc_scores is not None, (
+ "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a"
+ " retriever using the `set_retriever(...)` function."
+ )
+
+ assert (
+ doc_scores is not None
+ ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function."
+
+ assert (doc_scores.shape[1] % n_docs) == 0, (
+ f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
+ f" {context_input_ids.shape[0]}."
+ )
+
+ # Decoder input without context documents
+ if decoder_input_ids is not None:
+ decoder_input_ids = tf.repeat(decoder_input_ids, n_docs, axis=0)
+
+ if decoder_attention_mask is not None:
+ decoder_attention_mask = tf.repeat(decoder_attention_mask, n_docs, axis=0)
+
+ gen_outputs = self.generator(
+ context_input_ids,
+ attention_mask=context_attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ return_dict=True,
+ training=training,
+ )
+
+ if not has_to_retrieve:
+ question_encoder_last_hidden_state = None
+ question_enc_hidden_states = None
+ question_enc_attentions = None
+ retrieved_doc_embeds = None
+ retrieved_doc_ids = None
+ else:
+ question_enc_hidden_states = question_enc_outputs.hidden_states
+ question_enc_attentions = question_enc_outputs.attentions
+
+ if not has_to_retrieve or not output_retrieved:
+ # don't output retrieved docs
+ context_input_ids = (None,)
+ context_attention_mask = None
+ retrieved_doc_embeds = None
+ retrieved_doc_ids = None
+
+ return TFRetrievAugLMOutput(
+ logits=gen_outputs.logits,
+ doc_scores=doc_scores,
+ past_key_values=gen_outputs.past_key_values,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ retrieved_doc_embeds=retrieved_doc_embeds,
+ retrieved_doc_ids=retrieved_doc_ids,
+ question_encoder_last_hidden_state=question_encoder_last_hidden_state,
+ question_enc_hidden_states=question_enc_hidden_states,
+ question_enc_attentions=question_enc_attentions,
+ generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state,
+ generator_enc_hidden_states=gen_outputs.encoder_hidden_states,
+ generator_enc_attentions=gen_outputs.encoder_attentions,
+ generator_dec_hidden_states=gen_outputs.decoder_hidden_states,
+ generator_dec_attentions=gen_outputs.decoder_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ with tf.name_scope(self.generator.name):
+ self.generator.build(None)
+ with tf.name_scope(self.question_encoder.name):
+ self.question_encoder.build(None)
+
+
+@add_start_docstrings_to_model_forward(
+ """
+ A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.
+ """,
+ RAG_START_DOCSTRING,
+)
+class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss):
+ load_weight_prefix = "tf_rag_token_for_generation_1/rag"
+
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[TFPreTrainedModel] = None,
+ generator: Optional[TFPreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None,
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+
+ super().__init__(config)
+
+ # instantiate model
+ self.rag = TFRagModel(
+ config=config,
+ question_encoder=question_encoder,
+ generator=generator,
+ retriever=retriever,
+ load_weight_prefix=self.load_weight_prefix,
+ name="rag",
+ )
+
+ def set_retriever(self, retriever: RagRetriever):
+ self.rag.retriever = retriever
+
+ # Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_bart.py
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ doc_scores=None,
+ n_docs=None,
+ **kwargs,
+ ):
+ if past_key_values is not None:
+ # if past is defined use only last decoder_input_ids
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None,
+ "encoder_outputs": encoder_outputs,
+ "doc_scores": doc_scores,
+ "context_attention_mask": attention_mask,
+ "decoder_input_ids": decoder_input_ids,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ "do_marginalize": True,
+ "n_docs": n_docs,
+ }
+
+ @property
+ def retriever(self):
+ return self.rag.retriever
+
+ @property
+ def generator(self):
+ return self.rag.generator
+
+ @property
+ def question_encoder(self):
+ return self.rag.question_encoder
+
+ @staticmethod
+ def _gather_beams(nested, beam_indices, batch_axis=0):
+ """
+ RAG-specific `_gather_beams`: gathers the beam slices indexed by beam_indices into new beam array. If the
+ nested tensor has a shape mismatch with the beam indices, then it means it is the cache. In that case, isolates
+ and takes care of the extra dimension for ndocs.
+ """
+
+ def gather_fn(tensor):
+ is_rag_cache = tensor.shape[0] != beam_indices.shape[0]
+ if is_rag_cache:
+ n_docs = tensor.shape[0] // beam_indices.shape[0]
+ batch_size = beam_indices.shape[0]
+ # reshapes into (batch size, num beams, n_docs, ...), the cache format expected by RAG
+ tensor = tf.reshape(tensor, (batch_size, -1, n_docs, *tensor.shape[2:]))
+
+ gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1)
+
+ if is_rag_cache:
+ # reshapes back into the shape expected by beam search
+ gathered_tensor = tf.reshape(gathered_tensor, (batch_size * n_docs, -1, *gathered_tensor.shape[3:]))
+
+ return gathered_tensor
+
+ return tf.nest.map_structure(gather_fn, nested)
+
+ def marginalize(self, seq_logits, doc_scores, n_docs=None):
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # RAG-token marginalization
+ seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1)
+ seq_logprobs = tf.reshape(seq_logprobs, [seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]])
+ doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1)
+ doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1)
+ doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # twice
+ log_prob_sum = seq_logprobs + doc_logprobs
+ return tf.reduce_logsumexp(log_prob_sum, axis=1)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None,
+ doc_scores: np.ndarray | tf.Tensor | None = None,
+ context_input_ids: np.ndarray | tf.Tensor | None = None,
+ context_attention_mask: np.ndarray | tf.Tensor | None = None,
+ use_cache: bool | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ output_retrieved: bool | None = None,
+ n_docs: int | None = None,
+ do_marginalize: bool | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ reduce_loss: bool | None = None,
+ return_dict: bool | None = None,
+ training: bool = False,
+ **kwargs, # needs kwargs for generation
+ ) -> TFRetrievAugLMMarginOutput:
+ r"""
+ do_marginalize (`bool`, *optional*):
+ If `True`, the logits are marginalized over all documents by making use of
+ `torch.nn.functional.log_softmax`.
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss according to Rag-Token model formulation See
+ https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Token formulation. Indices should be
+ in `[0, ..., config.vocab_size - 1]`.
+ reduce_loss (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum`
+ operation.
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
+ Legacy dictionary, which is required so that model can use *generate()* function.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import AutoTokenizer, RagRetriever, TFRagTokenForGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True)
+
+ >>> input_dict = tokenizer.prepare_seq2seq_batch(
+ ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
+ ... )
+ >>> outputs = model(input_dict, output_retrieved=True)
+
+ >>> # or use retriever separately
+ >>> # 1. Encode
+ >>> input_ids = input_dict["input_ids"]
+ >>> question_hidden_states = model.question_encoder(input_ids)[0]
+ >>> # 2. Retrieve
+ >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf")
+ >>> doc_scores = tf.squeeze(
+ ... tf.matmul(
+ ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True
+ ... ),
+ ... axis=1,
+ ... )
+ >>> # 3. Forward to generator
+ >>> outputs = model(
+ ... inputs=None,
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... decoder_input_ids=input_dict["labels"],
+ ... )
+
+ >>> # or directly generate
+ >>> generated = model.generate(
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... )
+ >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
+ ```"""
+
+ assert (
+ "decoder_cached_states" not in kwargs
+ ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
+
+ do_marginalize = do_marginalize if do_marginalize else self.config.do_marginalize
+ reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss
+
+ if labels is not None:
+ if decoder_input_ids is None:
+ decoder_input_ids = labels
+ use_cache = False
+
+ outputs = self.rag(
+ input_ids,
+ attention_mask=attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ doc_scores=doc_scores,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_retrieved=output_retrieved,
+ n_docs=n_docs,
+ training=training,
+ )
+
+ loss = None
+ logits = outputs.logits
+ if labels is not None:
+ assert decoder_input_ids is not None
+ loss = self.get_nll(
+ outputs.logits,
+ outputs.doc_scores,
+ labels,
+ reduce_loss=reduce_loss,
+ epsilon=self.config.label_smoothing,
+ n_docs=n_docs,
+ )
+
+ if do_marginalize:
+ logits = self.marginalize(logits, outputs.doc_scores, n_docs)
+
+ return TFRetrievAugLMMarginOutput(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ doc_scores=outputs.doc_scores,
+ context_input_ids=outputs.context_input_ids,
+ context_attention_mask=outputs.context_attention_mask,
+ retrieved_doc_embeds=outputs.retrieved_doc_embeds,
+ retrieved_doc_ids=outputs.retrieved_doc_ids,
+ question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
+ question_enc_hidden_states=outputs.question_enc_hidden_states,
+ question_enc_attentions=outputs.question_enc_attentions,
+ generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
+ generator_enc_hidden_states=outputs.generator_enc_hidden_states,
+ generator_enc_attentions=outputs.generator_enc_attentions,
+ generator_dec_hidden_states=outputs.generator_dec_hidden_states,
+ generator_dec_attentions=outputs.generator_dec_attentions,
+ )
+
+ def generate(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: tf.Tensor | None = None,
+ context_input_ids=None,
+ context_attention_mask=None,
+ doc_scores=None,
+ n_docs=None,
+ generation_config=None,
+ logits_processor=TFLogitsProcessorList(),
+ **kwargs,
+ ):
+ """
+ Implements TFRAG token decoding.
+
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The sequence used as a prompt for the generation. If `input_ids` is not passed, then
+ `context_input_ids` has to be provided.
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ n_docs (`int`, *optional*, defaults to `config.n_docs`)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ logits_processor (`TFLogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and a
+ model's config. If a logit processor is passed that is already created with the arguments or a model's
+ config an error is thrown.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model.
+
+ Return:
+ `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The
+ second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early
+ due to the `eos_token_id`.
+ """
+ # Handle `generation_config` and kwargs that might update it
+ if generation_config is None:
+ generation_config = self.generation_config
+ generation_config = copy.deepcopy(generation_config)
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
+
+ # set default parameters
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # retrieve docs
+ if self.retriever is not None and context_input_ids is None:
+ question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
+ out = self.retriever(
+ input_ids,
+ question_hidden_states.numpy().astype(np.float32),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="tf",
+ )
+ context_input_ids, context_attention_mask, retrieved_doc_embeds = (
+ out["context_input_ids"],
+ out["context_attention_mask"],
+ out["retrieved_doc_embeds"],
+ )
+
+ context_input_ids = tf.cast(context_input_ids, tf.int32)
+ context_attention_mask = tf.cast(context_attention_mask, tf.int32)
+ retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
+
+ # compute doc_scores
+ doc_scores = tf.matmul(
+ tf.expand_dims(question_hidden_states, axis=1), retrieved_doc_embeds, transpose_b=True
+ )
+ doc_scores = tf.squeeze(doc_scores, axis=1)
+
+ assert (context_input_ids.shape[0] % n_docs) == 0, (
+ f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
+ f" {context_input_ids.shape[0]}."
+ )
+
+ batch_size = context_input_ids.shape[0] // n_docs
+
+ encoder = self.rag.generator.get_encoder()
+ encoder_outputs = encoder(
+ input_ids=context_input_ids,
+ attention_mask=context_attention_mask,
+ output_attentions=generation_config.output_attentions,
+ output_hidden_states=generation_config.output_hidden_states,
+ return_dict=True,
+ )
+
+ decoder_input_ids = tf.fill(
+ (batch_size * generation_config.num_beams, 1),
+ tf.cast(generation_config.decoder_start_token_id, tf.int32),
+ )
+ last_hidden_state = encoder_outputs["last_hidden_state"]
+
+ def extend_enc_output(tensor, num_beams=None):
+ """
+ Broadcast tensor with `num_beams` replica, with correct order Input: tensor of shape (batch_size*n_docs ,
+ d) Output: tensor of shape (batch_size*num_beams*n_docs , d)
+ """
+
+ # expand batch_size & num_beam dimensions
+ d_shape_list = tensor.shape[1:]
+
+ # split n_docs dimensions
+ new_shape = (batch_size, 1, n_docs) + d_shape_list
+ tensor = tf.reshape(tensor, new_shape)
+
+ # repeat same last hidden states over `num_beams` dimension
+ new_shape = (batch_size, num_beams, n_docs) + d_shape_list
+ tensor = tf.broadcast_to(tensor, new_shape)
+
+ # merge `batch_size`, `num_beams`, `num_docs` dims again
+ new_shape = (batch_size * num_beams * n_docs,) + d_shape_list
+ return tf.reshape(tensor, new_shape)
+
+ # correctly extend last_hidden_state and attention mask
+ context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams)
+ encoder_outputs["last_hidden_state"] = extend_enc_output(
+ last_hidden_state, num_beams=generation_config.num_beams
+ )
+
+ doc_scores = tf.repeat(doc_scores, generation_config.num_beams, axis=0)
+
+ # define start_len & additional parameters
+ model_kwargs["doc_scores"] = doc_scores
+ model_kwargs["encoder_outputs"] = encoder_outputs
+ model_kwargs["attention_mask"] = context_attention_mask
+ model_kwargs["n_docs"] = n_docs
+
+ pre_processor = self._get_logits_processor(
+ generation_config=generation_config,
+ input_ids_seq_length=tf.shape(decoder_input_ids)[-1],
+ logits_processor=logits_processor,
+ )
+
+ if generation_config.num_beams == 1:
+ return self.greedy_search(
+ input_ids=decoder_input_ids,
+ max_length=generation_config.max_length,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ logits_processor=pre_processor,
+ output_attentions=generation_config.output_attentions,
+ output_hidden_states=generation_config.output_hidden_states,
+ output_scores=generation_config.output_scores,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ **model_kwargs,
+ )
+ elif generation_config.num_beams > 1:
+ if generation_config.num_beams < generation_config.num_return_sequences:
+ raise ValueError(
+ "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >="
+ f" num_return_sequences, got {generation_config.num_beams} and"
+ f" {generation_config.num_return_sequences} (respectivelly)"
+ )
+
+ def unflatten_beam_dim(tensor):
+ """Unflattens the first, flat batch*beam dimension of a non-scalar array."""
+ shape = shape_list(tensor)
+ return tf.reshape(tensor, [-1, generation_config.num_beams] + shape[1:])
+
+ decoder_input_ids = unflatten_beam_dim(decoder_input_ids)
+ model_kwargs["attention_mask"] = unflatten_beam_dim(model_kwargs["attention_mask"])
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = unflatten_beam_dim(
+ model_kwargs["encoder_outputs"]["last_hidden_state"]
+ )
+
+ return self.beam_search(
+ input_ids=decoder_input_ids,
+ max_length=generation_config.max_length,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ logits_processor=pre_processor,
+ output_attentions=generation_config.output_attentions,
+ output_hidden_states=generation_config.output_hidden_states,
+ output_scores=generation_config.output_scores,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ **model_kwargs,
+ )
+ else:
+ raise ValueError(
+ f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}"
+ )
+
+ def get_input_embeddings(self):
+ return self.rag.generator.get_input_embeddings()
+
+ def get_output_embeddings(self):
+ return self.rag.generator.get_output_embeddings()
+
+ # Adapted from tf_t5's & tf_bart's _shift_right
+ def shift_tokens_right(self, input_ids, start_token_id=None):
+ """Shift input ids one token to the right, and pad with start_token_id"""
+
+ if start_token_id is None:
+ start_token_id = self.generator.config.decoder_start_token_id
+ assert start_token_id is not None, (
+ "self.generator.config.decoder_start_token_id has to be defined. In Rag we commonly use Bart as"
+ " generator, see Bart docs for more information"
+ )
+
+ pad_token_id = self.generator.config.pad_token_id
+ assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
+
+ start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.cast(start_token_id, input_ids.dtype))
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
+
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids = tf.where(
+ shifted_input_ids == -100,
+ tf.fill(shape_list(shifted_input_ids), tf.cast(pad_token_id, input_ids.dtype)),
+ shifted_input_ids,
+ )
+
+ # "Verify that `labels` has only positive values and -100"
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, shifted_input_ids.dtype))
+
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
+ with tf.control_dependencies([assert_gte0]):
+ shifted_input_ids = tf.identity(shifted_input_ids)
+
+ return shifted_input_ids
+
+ # nll stands for 'negative log likelihood'
+ def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ # shift tokens left (from original Pytorch's version)
+
+ target = tf.concat(
+ [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))],
+ axis=1,
+ )
+ rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
+ loss = self.hf_compute_loss(target, rag_logprobs, from_logits=True, reduce_loss=reduce_loss)
+
+ return loss
+
+ # Adopted modeling_tf_bart + add smooth_loss to match with pytorch version
+ def hf_compute_loss(self, labels, y_pred, smooth_epsilon=0.0, from_logits=True, reduce_loss=False):
+ """CrossEntropyLoss that ignores pad tokens"""
+ # Matt: As written, this loss is not XLA-compatible, but it's doing some very weird things
+ # and I don't feel comfortable converting it.
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(
+ from_logits=True,
+ reduction=keras.losses.Reduction.SUM,
+ )
+
+ if from_logits is False: # convert to logits
+ eps = 1e-9
+ y_pred = tf.clip_by_value(y_pred, clip_value_min=eps, clip_value_max=1 - eps)
+ y_pred = tf.math.log(y_pred)
+
+ logits = y_pred
+ melted_labels = tf.reshape(labels, (-1,))
+ active_loss = tf.not_equal(melted_labels, self.config.generator.pad_token_id)
+
+ reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, logits.shape[2])), active_loss)
+ labels = tf.boolean_mask(melted_labels, active_loss)
+ nll_loss = loss_fn(labels, reduced_logits)
+
+ smooth_loss = -tf.reduce_sum(reduced_logits, axis=-1)
+ smooth_loss = tf.reduce_sum(smooth_loss) # sum and squeeze like torch
+ eps_i = smooth_epsilon / reduced_logits.shape[-1]
+
+ loss = (1.0 - smooth_epsilon) * nll_loss + eps_i * smooth_loss
+
+ return loss
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "rag", None) is not None:
+ with tf.name_scope(self.rag.name):
+ self.rag.build(None)
+
+
+@add_start_docstrings_to_model_forward(
+ """
+ A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.
+ """,
+ RAG_START_DOCSTRING,
+)
+class TFRagSequenceForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss):
+ load_weight_prefix = "tf_rag_sequence_for_generation_1/rag"
+
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[TFPreTrainedModel] = None,
+ generator: Optional[TFPreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None,
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+
+ super().__init__(config)
+
+ # instantiate model
+ self.rag = TFRagModel(
+ config=config,
+ question_encoder=question_encoder,
+ generator=generator,
+ retriever=retriever,
+ load_weight_prefix=self.load_weight_prefix,
+ name="rag",
+ )
+
+ def set_retriever(self, retriever: RagRetriever):
+ self.rag.retriever = retriever
+
+ @property
+ def retriever(self):
+ return self.rag.retriever
+
+ @property
+ def generator(self):
+ return self.rag.generator
+
+ @property
+ def question_encoder(self):
+ return self.rag.question_encoder
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ doc_scores: np.ndarray | tf.Tensor | None = None,
+ context_input_ids: np.ndarray | tf.Tensor | None = None,
+ context_attention_mask: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_retrieved: Optional[bool] = None,
+ n_docs: Optional[int] = None,
+ exclude_bos_score: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ reduce_loss: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ **kwargs, # needs kwargs for generation
+ ) -> Union[Tuple[tf.Tensor], TFRetrievAugLMMarginOutput]:
+ r"""
+ exclude_bos_score (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing
+ the loss.
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See
+ https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Sequence formulation. Indices should
+ be in `[0, ..., config.vocab_size - 1]`.
+ reduce_loss (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum`
+ operation.
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
+ Legacy dictionary, which is required so that model can use *generate()* function.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RagRetriever, TFRagSequenceForGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = TFRagSequenceForGeneration.from_pretrained(
+ ... "facebook/rag-sequence-nq", retriever=retriever, from_pt=True
+ ... )
+
+ >>> input_dict = tokenizer.prepare_seq2seq_batch(
+ ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
+ ... )
+ >>> outputs = model(input_dict, output_retrieved=True)
+
+ >>> # or use retriever separately
+ >>> # 1. Encode
+ >>> input_ids = input_dict["input_ids"]
+ >>> question_hidden_states = model.question_encoder(input_ids)[0]
+ >>> # 2. Retrieve
+ >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf")
+ >>> doc_scores = tf.squeeze(
+ ... tf.matmul(
+ ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True
+ ... ),
+ ... axis=1,
+ ... )
+ >>> # 3. Forward to generator
+ >>> outputs = model(
+ ... inputs=None,
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... decoder_input_ids=input_dict["labels"],
+ ... )
+
+ >>> # or directly generate
+ >>> generated = model.generate(
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... )
+ >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
+ ```"""
+
+ assert (
+ "decoder_cached_states" not in kwargs
+ ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
+
+ exclude_bos_score = exclude_bos_score if exclude_bos_score else self.config.exclude_bos_score
+ reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss
+
+ if labels is not None:
+ if decoder_input_ids is None:
+ decoder_input_ids = labels
+ use_cache = False
+
+ outputs = self.rag(
+ input_ids,
+ attention_mask=attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ doc_scores=doc_scores,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_retrieved=output_retrieved,
+ n_docs=n_docs,
+ training=training,
+ )
+
+ loss = None
+ if labels is not None:
+ loss = self.get_nll(
+ outputs.logits,
+ outputs.doc_scores,
+ labels,
+ reduce_loss=reduce_loss,
+ epsilon=self.config.label_smoothing,
+ n_docs=n_docs,
+ )
+
+ return TFRetrievAugLMMarginOutput(
+ loss=loss,
+ logits=outputs.logits,
+ doc_scores=outputs.doc_scores,
+ past_key_values=outputs.past_key_values,
+ context_input_ids=outputs.context_input_ids,
+ context_attention_mask=outputs.context_attention_mask,
+ retrieved_doc_embeds=outputs.retrieved_doc_embeds,
+ retrieved_doc_ids=outputs.retrieved_doc_ids,
+ question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
+ question_enc_hidden_states=outputs.question_enc_hidden_states,
+ question_enc_attentions=outputs.question_enc_attentions,
+ generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
+ generator_enc_hidden_states=outputs.generator_enc_hidden_states,
+ generator_enc_attentions=outputs.generator_enc_attentions,
+ generator_dec_hidden_states=outputs.generator_dec_hidden_states,
+ generator_dec_attentions=outputs.generator_dec_attentions,
+ )
+
+ def get_nll(
+ self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
+ ):
+ # shift tokens left
+ target = tf.concat(
+ [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))],
+ axis=1,
+ )
+
+ # bos_token_id is None for T5
+ bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ equal_bos_token_id_all = tf.reduce_all(tf.equal(target[:, 0], bos_token_id))
+ use_bos = bos_token_id is not None and equal_bos_token_id_all
+
+ def _mask_pads(ll, smooth_obj):
+ pad_mask = tf.equal(target, tf.cast(self.config.generator.pad_token_id, target.dtype))
+ if tf.reduce_any(pad_mask):
+ ll = tf.where(pad_mask, 0.0, ll)
+ smooth_obj = tf.where(pad_mask, 0.0, smooth_obj)
+ return tf.squeeze(ll, axis=-1), tf.squeeze(smooth_obj, axis=-1)
+
+ # seq_logits.shape = (batch*n_docs, tgt_len , vocabs)
+ seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1)
+ seq_logprobs = tf.reshape(
+ seq_logprobs, (seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1])
+ ) # (batch_size, n_docs, tgt_len, vocabs)
+ doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1)
+ doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1)
+ doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # done twice to get 4-D
+
+ # RAG-sequence marginalization
+ first_token_scores = seq_logprobs[:, :, :1, :]
+ second_token_scores = seq_logprobs[:, :, 1:2, :]
+ remainder = seq_logprobs[:, :, 2:, :]
+ rag_logprobs = tf.concat([first_token_scores, second_token_scores + doc_logprobs, remainder], axis=2)
+
+ # calculate loss
+ target = tf.expand_dims(target, axis=1) # n_docs dimension
+ target = tf.expand_dims(target, axis=-1) # logits dimension
+ target = tf.repeat(target, n_docs, axis=1)
+ assert len(target.shape) == len(rag_logprobs.shape)
+
+ # last-axis gathering only - use 2D-reshape-trick for Torch's style nD gathering
+ def torch_gather(param, id_tensor):
+ # 2d-gather torch equivalent: https://stackoverflow.com/questions/52129909/tensorflow-equivalent-of-torch-gather
+ def gather2d(target, id_tensor):
+ idx = tf.stack([tf.range(tf.shape(id_tensor)[0], dtype=id_tensor.dtype), id_tensor[:, 0]], axis=-1)
+ result = tf.gather_nd(target, idx)
+ return tf.expand_dims(result, axis=-1)
+
+ target = tf.reshape(param, (-1, param.shape[-1])) # reshape 2D
+ target_shape = id_tensor.shape
+
+ id_tensor = tf.reshape(id_tensor, (-1, 1)) # also 2D-index
+ result = gather2d(target, id_tensor)
+ return tf.reshape(result, target_shape)
+
+ ll = torch_gather(rag_logprobs, id_tensor=target)
+ smooth_obj = tf.reduce_sum(rag_logprobs, axis=-1, keepdims=True) # total sum of all (normalised) logits
+
+ ll, smooth_obj = _mask_pads(ll, smooth_obj)
+
+ # sum over tokens, exclude bos while scoring
+ if exclude_bos_score and use_bos:
+ ll = tf.reduce_sum(ll[:, :, 1:], axis=2)
+ else:
+ ll = tf.reduce_sum(ll, axis=2)
+
+ smooth_obj = tf.reduce_sum(smooth_obj, axis=2)
+ ll = tf.math.reduce_logsumexp(ll, axis=1) # logsumexp over docs
+ smooth_obj = tf.math.reduce_logsumexp(smooth_obj, axis=1)
+
+ nll_loss = -ll
+ smooth_loss = -smooth_obj
+
+ if reduce_loss:
+ nll_loss = tf.reduce_sum(nll_loss)
+ smooth_loss = tf.reduce_sum(smooth_loss)
+
+ eps_i = epsilon / rag_logprobs.shape[-1]
+ loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
+ return loss
+
+ def generate(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: tf.Tensor | None = None,
+ context_input_ids=None,
+ context_attention_mask=None,
+ doc_scores=None,
+ do_deduplication=None, # defaults to True
+ num_return_sequences=None, # defaults to 1
+ num_beams=None, # defaults to 1
+ n_docs=None,
+ **model_kwargs,
+ ):
+ """
+ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation
+ for more information on how to set other generate input parameters
+
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The sequence used as a prompt for the generation. If `input_ids` is not passed, then
+ `context_input_ids` has to be provided.
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for
+ tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention
+ masks?](../glossary#attention-mask)
+ context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
+ retriever.
+ context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever. If the model has is not initialized with a `retriever` or `input_ids` is not given,
+ `context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are
+ returned by [`~RagRetriever.__call__`].
+ doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` or
+ `input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are
+ returned by [`~RagRetriever.__call__`].
+ do_deduplication (`bool`, *optional*):
+ Whether or not to deduplicate the generations from different context documents for a given input. Has
+ to be set to `False` if used while training with distributed backend.
+ num_return_sequences(`int`, *optional*, defaults to 1):
+ The number of independently computed returned sequences for each element in the batch. Note that this
+ is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function,
+ where we set `num_return_sequences` to `num_beams`.
+ num_beams (`int`, *optional*, defaults to 1):
+ Number of beams for beam search. 1 means no beam search.
+ n_docs (`int`, *optional*, defaults to `config.n_docs`)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional kwargs will be passed to [`~generation.GenerationMixin.generate`]
+
+ Return:
+ `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The
+ second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early
+ due to the `eos_token_id`.
+ """
+
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
+ num_doc_return_sequences = (
+ num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
+ )
+ num_beams = num_beams if num_beams is not None else self.config.num_beams
+
+ assert (
+ input_ids is not None or context_input_ids is not None
+ ), " At least one of input_ids or context_input_ids must be given"
+
+ if self.retriever is not None and context_input_ids is None:
+ question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
+ context_input_ids = self.retriever(
+ input_ids,
+ question_hidden_states.numpy(),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="tf",
+ )["context_input_ids"]
+
+ hypos = []
+ model_kwargs["num_beams"] = num_beams
+ model_kwargs["num_return_sequences"] = num_beams # put here so that not confused with num_doc_return_sequences
+ model_kwargs["attention_mask"] = None
+
+ batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
+
+ for index in range(batch_size):
+ # first, generate beams from documents:
+ generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)
+
+ output_sequences = self.generator.generate(
+ generator_input_ids,
+ **model_kwargs,
+ ) # n_docs * n_beam, tgt_len
+ if do_deduplication:
+ # do_deduplication -- for TF, work on Eager mode only!
+ output_sequences = tf.stack(list({str(k.numpy().tolist()): k for k in output_sequences}.values()))
+
+ num_candidates = output_sequences.shape[
+ 0
+ ] # after deduplication, this number can be less than n_docs*n_beam
+
+ # then, run model forwards to get nll scores:
+ if input_ids is not None:
+ new_input_ids = tf.tile(input_ids[index : index + 1], (num_candidates, 1))
+ outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
+ else: # input_ids is None, need context_input_ids/mask and doc_scores
+ assert context_attention_mask is not None, (
+ "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you"
+ " can set a retriever using the `set_retriever(...)` function."
+ )
+ assert doc_scores is not None, (
+ "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a"
+ " retriever using the `set_retriever(...)` function."
+ )
+
+ individual_input_ids = tf.tile(
+ generator_input_ids, (num_candidates, 1)
+ ) # (num_candidates*n_docs, max_len)
+
+ individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]
+ individual_attention_mask = tf.tile(individual_attention_mask, (num_candidates, 1))
+
+ individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]
+ individual_doc_scores = tf.tile(individual_doc_scores, (num_candidates, 1)) # [num_candidates, n_docs]
+
+ outputs = self(
+ input_ids=None,
+ context_input_ids=individual_input_ids,
+ context_attention_mask=individual_attention_mask,
+ doc_scores=individual_doc_scores,
+ labels=output_sequences,
+ exclude_bos_score=True,
+ )
+
+ top_cand_inds = tf.math.top_k((-outputs["loss"]), k=num_doc_return_sequences)[1]
+
+ # add hypothesis
+ hypos.append(tf.gather(output_sequences, top_cand_inds))
+
+ return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
+
+ @staticmethod
+ def _cat_and_pad(tensors, pad_token_id):
+ # used by generate(): tensors is a (batched) list of (candidates, len); len is varied across batch
+
+ # Initialize padded tensor with shape ( all_candidates , max_candidate_length ),
+ # where all_candidates counted from all inputs
+ new_shape = sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])
+ output = tf.fill(new_shape, pad_token_id)
+
+ # Normal tensor doesn't support slice assignment, so we need tf.Variable
+ output = tf.Variable(output)
+
+ # Assign, and then convert back to tensor
+ ind = 0
+ for t in tensors:
+ output[ind : ind + t.shape[0], : t.shape[1]].assign(t)
+ ind += t.shape[0]
+
+ output = tf.convert_to_tensor(output)
+ return tf.cast(output, tensors[0][0][0].dtype)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "rag", None) is not None:
+ with tf.name_scope(self.rag.name):
+ self.rag.build(None)
+
+
+__all__ = ["TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py b/janus/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4000aa6e7f6710957352fbd483ee81c90284eda
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py
@@ -0,0 +1,677 @@
+# coding=utf-8
+# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""RAG Retriever model implementation."""
+
+import os
+import pickle
+import time
+from typing import Iterable, List, Optional, Tuple
+
+import numpy as np
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...tokenization_utils_base import BatchEncoding
+from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool
+from .configuration_rag import RagConfig
+from .tokenization_rag import RagTokenizer
+
+
+if is_datasets_available():
+ from datasets import Dataset, load_dataset, load_from_disk
+
+if is_faiss_available():
+ import faiss
+
+
+logger = logging.get_logger(__name__)
+
+
+LEGACY_INDEX_PATH = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/"
+
+
+class Index:
+ """
+ A base class for the Indices encapsulated by the [`RagRetriever`].
+ """
+
+ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:
+ """
+ Returns a list of dictionaries, containing titles and text of the retrieved documents.
+
+ Args:
+ doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`):
+ A tensor of document indices.
+ """
+ raise NotImplementedError
+
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
+ """
+ For each query in the batch, retrieves `n_docs` documents.
+
+ Args:
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
+ An array of query vectors.
+ n_docs (`int`):
+ The number of docs retrieved per query.
+
+ Returns:
+ `np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of
+ shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents.
+ """
+ raise NotImplementedError
+
+ def is_initialized(self):
+ """
+ Returns `True` if index is already initialized.
+ """
+ raise NotImplementedError
+
+ def init_index(self):
+ """
+ A function responsible for loading the index into memory. Should be called only once per training run of a RAG
+ model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load
+ the index.
+ """
+ raise NotImplementedError
+
+
+class LegacyIndex(Index):
+ """
+ An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use
+ default faiss index parameters as specified in that repository.
+
+ Args:
+ vector_size (`int`):
+ The dimension of indexed vectors.
+ index_path (`str`):
+ A path to a *directory* containing index files compatible with [`~models.rag.retrieval_rag.LegacyIndex`]
+ """
+
+ INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index"
+ PASSAGE_FILENAME = "psgs_w100.tsv.pkl"
+
+ def __init__(self, vector_size, index_path):
+ self.index_id_to_db_id = []
+ self.index_path = index_path
+ self.passages = self._load_passages()
+ self.vector_size = vector_size
+ self.index = None
+ self._index_initialized = False
+
+ def _resolve_path(self, index_path, filename):
+ is_local = os.path.isdir(index_path)
+ try:
+ # Load from URL or cache if already cached
+ resolved_archive_file = cached_file(index_path, filename)
+ except EnvironmentError:
+ msg = (
+ f"Can't load '{filename}'. Make sure that:\n\n"
+ f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n"
+ f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n"
+ )
+ raise EnvironmentError(msg)
+ if is_local:
+ logger.info(f"loading file {resolved_archive_file}")
+ else:
+ logger.info(f"loading file {filename} from cache at {resolved_archive_file}")
+ return resolved_archive_file
+
+ def _load_passages(self):
+ logger.info(f"Loading passages from {self.index_path}")
+ passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME)
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
+ raise ValueError(
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
+ )
+ with open(passages_path, "rb") as passages_file:
+ passages = pickle.load(passages_file)
+ return passages
+
+ def _deserialize_index(self):
+ logger.info(f"Loading index from {self.index_path}")
+ resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index.dpr")
+ self.index = faiss.read_index(resolved_index_path)
+ resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index_meta.dpr")
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
+ raise ValueError(
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
+ )
+ with open(resolved_meta_path, "rb") as metadata_file:
+ self.index_id_to_db_id = pickle.load(metadata_file)
+ assert (
+ len(self.index_id_to_db_id) == self.index.ntotal
+ ), "Deserialized index_id_to_db_id should match faiss index size"
+
+ def is_initialized(self):
+ return self._index_initialized
+
+ def init_index(self):
+ index = faiss.IndexHNSWFlat(self.vector_size + 1, 512)
+ index.hnsw.efSearch = 128
+ index.hnsw.efConstruction = 200
+ self.index = index
+ self._deserialize_index()
+ self._index_initialized = True
+
+ def get_doc_dicts(self, doc_ids: np.array):
+ doc_list = []
+ for doc_ids_i in doc_ids:
+ ids = [str(int(doc_id)) for doc_id in doc_ids_i]
+ docs = [self.passages[doc_id] for doc_id in ids]
+ doc_list.append(docs)
+ doc_dicts = []
+ for docs in doc_list:
+ doc_dict = {}
+ doc_dict["title"] = [doc[1] for doc in docs]
+ doc_dict["text"] = [doc[0] for doc in docs]
+ doc_dicts.append(doc_dict)
+ return doc_dicts
+
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
+ aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1)
+ query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim))
+ _, docs_ids = self.index.search(query_nhsw_vectors, n_docs)
+ vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids]
+ ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids]
+ return np.array(ids), np.array(vectors)
+
+
+class HFIndexBase(Index):
+ def __init__(self, vector_size, dataset, index_initialized=False):
+ self.vector_size = vector_size
+ self.dataset = dataset
+ self._index_initialized = index_initialized
+ self._check_dataset_format(with_index=index_initialized)
+ dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True, dtype="float32")
+
+ def _check_dataset_format(self, with_index: bool):
+ if not isinstance(self.dataset, Dataset):
+ raise TypeError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}")
+ if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0:
+ raise ValueError(
+ "Dataset should be a dataset with the following columns: "
+ "title (str), text (str) and embeddings (arrays of dimension vector_size), "
+ f"but got columns {self.dataset.column_names}"
+ )
+ if with_index and "embeddings" not in self.dataset.list_indexes():
+ raise ValueError(
+ "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it "
+ "or `dataset.load_faiss_index` to load one from the disk."
+ )
+
+ def init_index(self):
+ raise NotImplementedError()
+
+ def is_initialized(self):
+ return self._index_initialized
+
+ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:
+ return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])]
+
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
+ _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs)
+ docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids]
+ vectors = [doc["embeddings"] for doc in docs]
+ for i in range(len(vectors)):
+ if len(vectors[i]) < n_docs:
+ vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))])
+ return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
+
+
+class CanonicalHFIndex(HFIndexBase):
+ """
+ A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed
+ index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path
+ on disk.
+
+ Args:
+ vector_size (`int`): the dimension of the passages embeddings used by the index
+ dataset_name (`str`, optional, defaults to `wiki_dpr`):
+ A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids
+ with `datasets.list_datasets()`).
+ dataset_split (`str`, optional, defaults to `train`)
+ Which split of the `dataset` to load.
+ index_name (`str`, optional, defaults to `train`)
+ The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved
+ under this name.
+ index_path (`str`, optional, defaults to `None`)
+ The path to the serialized faiss index on disk.
+ use_dummy_dataset (`bool`, optional, defaults to `False`):
+ If True, use the dummy configuration of the dataset for tests.
+ """
+
+ def __init__(
+ self,
+ vector_size: int,
+ dataset_name: str = "wiki_dpr",
+ dataset_split: str = "train",
+ index_name: Optional[str] = None,
+ index_path: Optional[str] = None,
+ use_dummy_dataset=False,
+ dataset_revision=None,
+ ):
+ if int(index_path is None) + int(index_name is None) != 1:
+ raise ValueError("Please provide `index_name` or `index_path`.")
+ self.dataset_name = dataset_name
+ self.dataset_split = dataset_split
+ self.index_name = index_name
+ self.index_path = index_path
+ self.use_dummy_dataset = use_dummy_dataset
+ self.dataset_revision = dataset_revision
+ logger.info(f"Loading passages from {self.dataset_name}")
+ dataset = load_dataset(
+ self.dataset_name,
+ with_index=False,
+ split=self.dataset_split,
+ dummy=self.use_dummy_dataset,
+ revision=dataset_revision,
+ )
+ super().__init__(vector_size, dataset, index_initialized=False)
+
+ def init_index(self):
+ if self.index_path is not None:
+ logger.info(f"Loading index from {self.index_path}")
+ self.dataset.load_faiss_index("embeddings", file=self.index_path)
+ else:
+ logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}")
+ self.dataset = load_dataset(
+ self.dataset_name,
+ with_embeddings=True,
+ with_index=True,
+ split=self.dataset_split,
+ index_name=self.index_name,
+ dummy=self.use_dummy_dataset,
+ revision=self.dataset_revision,
+ )
+ self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True)
+ self._index_initialized = True
+
+
+class CustomHFIndex(HFIndexBase):
+ """
+ A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the
+ indicated paths on disk.
+
+ Args:
+ vector_size (`int`): the dimension of the passages embeddings used by the index
+ dataset_path (`str`):
+ The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and
+ embeddings (arrays of dimension vector_size)
+ index_path (`str`)
+ The path to the serialized faiss index on disk.
+ """
+
+ def __init__(self, vector_size: int, dataset, index_path=None):
+ super().__init__(vector_size, dataset, index_initialized=index_path is None)
+ self.index_path = index_path
+
+ @classmethod
+ def load_from_disk(cls, vector_size, dataset_path, index_path):
+ logger.info(f"Loading passages from {dataset_path}")
+ if dataset_path is None or index_path is None:
+ raise ValueError(
+ "Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` "
+ "and `dataset.get_index('embeddings').save(index_path)`."
+ )
+ dataset = load_from_disk(dataset_path)
+ return cls(vector_size=vector_size, dataset=dataset, index_path=index_path)
+
+ def init_index(self):
+ if not self.is_initialized():
+ logger.info(f"Loading index from {self.index_path}")
+ self.dataset.load_faiss_index("embeddings", file=self.index_path)
+ self._index_initialized = True
+
+
+class RagRetriever:
+ """
+ Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents
+ contents, and it formats them to be used with a RagModel.
+
+ Args:
+ config ([`RagConfig`]):
+ The configuration of the RAG model this Retriever is used with. Contains parameters indicating which
+ `Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical
+ one (default) from the datasets library with `config.index_name="wiki_dpr"` for example.
+ question_encoder_tokenizer ([`PreTrainedTokenizer`]):
+ The tokenizer that was used to tokenize the question. It is used to decode the question and then use the
+ generator_tokenizer.
+ generator_tokenizer ([`PreTrainedTokenizer`]):
+ The tokenizer used for the generator part of the RagModel.
+ index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration):
+ If specified, use this index instead of the one built using the configuration
+
+ Examples:
+
+ ```python
+ >>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact')
+ >>> from transformers import RagRetriever
+
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed"
+ ... )
+
+ >>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py
+ >>> from transformers import RagRetriever
+
+ >>> dataset = (
+ ... ...
+ ... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index
+ >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset)
+
+ >>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py
+ >>> from transformers import RagRetriever
+
+ >>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)*
+ >>> index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)*
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/dpr-ctx_encoder-single-nq-base",
+ ... index_name="custom",
+ ... passages_path=dataset_path,
+ ... index_path=index_path,
+ ... )
+
+ >>> # To load the legacy index built originally for Rag's paper
+ >>> from transformers import RagRetriever
+
+ >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy")
+ ```"""
+
+ def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True):
+ self._init_retrieval = init_retrieval
+ requires_backends(self, ["datasets", "faiss"])
+ super().__init__()
+ self.index = index or self._build_index(config)
+ self.generator_tokenizer = generator_tokenizer
+ self.question_encoder_tokenizer = question_encoder_tokenizer
+
+ self.n_docs = config.n_docs
+ self.batch_size = config.retrieval_batch_size
+
+ self.config = config
+ if self._init_retrieval:
+ self.init_retrieval()
+
+ self.ctx_encoder_tokenizer = None
+ self.return_tokenized_docs = False
+
+ @staticmethod
+ def _build_index(config):
+ if config.index_name == "legacy":
+ return LegacyIndex(
+ config.retrieval_vector_size,
+ config.index_path or LEGACY_INDEX_PATH,
+ )
+ elif config.index_name == "custom":
+ return CustomHFIndex.load_from_disk(
+ vector_size=config.retrieval_vector_size,
+ dataset_path=config.passages_path,
+ index_path=config.index_path,
+ )
+ else:
+ return CanonicalHFIndex(
+ vector_size=config.retrieval_vector_size,
+ dataset_name=config.dataset,
+ dataset_split=config.dataset_split,
+ index_name=config.index_name,
+ index_path=config.index_path,
+ use_dummy_dataset=config.use_dummy_dataset,
+ dataset_revision=config.dataset_revision,
+ )
+
+ @classmethod
+ def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
+ requires_backends(cls, ["datasets", "faiss"])
+ config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs)
+ rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
+ question_encoder_tokenizer = rag_tokenizer.question_encoder
+ generator_tokenizer = rag_tokenizer.generator
+ if indexed_dataset is not None:
+ config.index_name = "custom"
+ index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)
+ else:
+ index = cls._build_index(config)
+ return cls(
+ config,
+ question_encoder_tokenizer=question_encoder_tokenizer,
+ generator_tokenizer=generator_tokenizer,
+ index=index,
+ )
+
+ def save_pretrained(self, save_directory):
+ if isinstance(self.index, CustomHFIndex):
+ if self.config.index_path is None:
+ index_path = os.path.join(save_directory, "hf_dataset_index.faiss")
+ self.index.dataset.get_index("embeddings").save(index_path)
+ self.config.index_path = index_path
+ if self.config.passages_path is None:
+ passages_path = os.path.join(save_directory, "hf_dataset")
+ # datasets don't support save_to_disk with indexes right now
+ faiss_index = self.index.dataset._indexes.pop("embeddings")
+ self.index.dataset.save_to_disk(passages_path)
+ self.index.dataset._indexes["embeddings"] = faiss_index
+ self.config.passages_path = passages_path
+ self.config.save_pretrained(save_directory)
+ rag_tokenizer = RagTokenizer(
+ question_encoder=self.question_encoder_tokenizer,
+ generator=self.generator_tokenizer,
+ )
+ rag_tokenizer.save_pretrained(save_directory)
+
+ def init_retrieval(self):
+ """
+ Retriever initialization function. It loads the index into memory.
+ """
+
+ logger.info("initializing retrieval")
+ self.index.init_index()
+
+ def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):
+ r"""
+ Postprocessing retrieved `docs` and combining them with `input_strings`.
+
+ Args:
+ docs (`dict`):
+ Retrieved documents.
+ input_strings (`str`):
+ Input strings decoded by `preprocess_query`.
+ prefix (`str`):
+ Prefix added at the beginning of each input, typically used with T5-based models.
+
+ Return:
+ `tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible
+ `attention_mask`.
+ """
+
+ def cat_input_and_doc(doc_title, doc_text, input_string, prefix):
+ # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation
+ # TODO(piktus): better handling of truncation
+ if doc_title.startswith('"'):
+ doc_title = doc_title[1:]
+ if doc_title.endswith('"'):
+ doc_title = doc_title[:-1]
+ if prefix is None:
+ prefix = ""
+ out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace(
+ " ", " "
+ )
+ return out
+
+ rag_input_strings = [
+ cat_input_and_doc(
+ docs[i]["title"][j],
+ docs[i]["text"][j],
+ input_strings[i],
+ prefix,
+ )
+ for i in range(len(docs))
+ for j in range(n_docs)
+ ]
+
+ contextualized_inputs = self.generator_tokenizer.batch_encode_plus(
+ rag_input_strings,
+ max_length=self.config.max_combined_length,
+ return_tensors=return_tensors,
+ padding="max_length",
+ truncation=True,
+ )
+
+ return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"]
+
+ def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]:
+ return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)]
+
+ def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray]:
+ question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size)
+ ids_batched = []
+ vectors_batched = []
+ for question_hidden_states in question_hidden_states_batched:
+ start_time = time.time()
+ ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs)
+ logger.debug(
+ f"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}"
+ )
+ ids_batched.extend(ids)
+ vectors_batched.extend(vectors)
+ return (
+ np.array(ids_batched),
+ np.array(vectors_batched),
+ ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
+
+ def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]:
+ """
+ Retrieves documents for specified `question_hidden_states`.
+
+ Args:
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
+ A batch of query vectors to retrieve with.
+ n_docs (`int`):
+ The number of docs retrieved per query.
+
+ Return:
+ `Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects:
+
+ - **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings
+ of the retrieved docs per query.
+ - **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index
+ - **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query.
+ """
+
+ doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)
+ return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids)
+
+ def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer):
+ # used in end2end retriever training
+ self.ctx_encoder_tokenizer = ctx_encoder_tokenizer
+ self.return_tokenized_docs = True
+
+ def __call__(
+ self,
+ question_input_ids: List[List[int]],
+ question_hidden_states: np.ndarray,
+ prefix=None,
+ n_docs=None,
+ return_tensors=None,
+ ) -> BatchEncoding:
+ """
+ Retrieves documents for specified `question_hidden_states`.
+
+ Args:
+ question_input_ids (`List[List[int]]`) batch of input ids
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`:
+ A batch of query vectors to retrieve with.
+ prefix (`str`, *optional*):
+ The prefix used by the generator's tokenizer.
+ n_docs (`int`, *optional*):
+ The number of docs retrieved per query.
+ return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+
+ Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **context_input_ids** -- List of token ids to be fed to a model.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ - **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model
+ (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ - **retrieved_doc_embeds** -- List of embeddings of the retrieved documents
+ - **doc_ids** -- List of ids of the retrieved documents
+ """
+
+ n_docs = n_docs if n_docs is not None else self.n_docs
+ prefix = prefix if prefix is not None else self.config.generator.prefix
+ retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs)
+
+ input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True)
+ context_input_ids, context_attention_mask = self.postprocess_docs(
+ docs, input_strings, prefix, n_docs, return_tensors=return_tensors
+ )
+
+ if self.return_tokenized_docs:
+ retrieved_doc_text = []
+ retrieved_doc_title = []
+
+ for b_idx in range(len(docs)):
+ for doc_idx in range(n_docs):
+ retrieved_doc_text.append(docs[b_idx]["text"][doc_idx])
+ retrieved_doc_title.append(docs[b_idx]["title"][doc_idx])
+
+ tokenized_docs = self.ctx_encoder_tokenizer(
+ retrieved_doc_title,
+ retrieved_doc_text,
+ truncation=True,
+ padding="longest",
+ return_tensors=return_tensors,
+ )
+
+ return BatchEncoding(
+ {
+ "context_input_ids": context_input_ids,
+ "context_attention_mask": context_attention_mask,
+ "retrieved_doc_embeds": retrieved_doc_embeds,
+ "doc_ids": doc_ids,
+ "tokenized_doc_ids": tokenized_docs["input_ids"],
+ "tokenized_doc_attention_mask": tokenized_docs["attention_mask"],
+ },
+ tensor_type=return_tensors,
+ )
+
+ else:
+ return BatchEncoding(
+ {
+ "context_input_ids": context_input_ids,
+ "context_attention_mask": context_attention_mask,
+ "retrieved_doc_embeds": retrieved_doc_embeds,
+ "doc_ids": doc_ids,
+ },
+ tensor_type=return_tensors,
+ )
+
+
+__all__ = ["RagRetriever"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d3fcb8d3982f05d5de8901e09ce6c49717c11dcc
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c88977e678d4683af0ae52c8341541972b02ece
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9603c5721181763e747725a48ef882e2fbca1973
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6349255ed3a4756aa7d449ff6af35847886dd136
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_starcoder2 import *
+ from .modeling_starcoder2 import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59f733e34e25e093a5298c7e37436d6017646d31
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/configuration_starcoder2.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/configuration_starcoder2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b125335f8044d13bf390d6f84b57c494a102e4e
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/configuration_starcoder2.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modeling_starcoder2.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modeling_starcoder2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4b51afab2a992fb6e59c3709cb9a69e260d80e4
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modeling_starcoder2.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modular_starcoder2.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modular_starcoder2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd779cd8210a6c92bb76d9e24c5691683de98ac4
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modular_starcoder2.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/starcoder2/configuration_starcoder2.py b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/configuration_starcoder2.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f21d1f12d8b22f1d01360dac289b1843a89b098
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/configuration_starcoder2.py
@@ -0,0 +1,202 @@
+# coding=utf-8
+# Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Starcoder2 model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...modeling_rope_utils import rope_config_validation
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class Starcoder2Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Starcoder2Model`]. It is used to instantiate a
+ Starcoder2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the [bigcode/starcoder2-7b](https://huggingface.co/bigcode/starcoder2-7b) model.
+
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 49152):
+ Vocabulary size of the Starcoder2 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`Starcoder2Model`]
+ hidden_size (`int`, *optional*, defaults to 3072):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 12288):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 30):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 24):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_key_value_heads (`int`, *optional*, defaults to 2):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
+ The maximum sequence length that this model might ever be used with. Starcoder2's sliding window attention
+ allows sequence of up to 4096*32 tokens.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ norm_epsilon (`float`, *optional*, defaults to 1e-05):
+ Epsilon value for the layer norm
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ bos_token_id (`int`, *optional*, defaults to 50256):
+ The id of the "beginning-of-sequence" token.
+ eos_token_id (`int`, *optional*, defaults to 50256):
+ The id of the "end-of-sequence" token.
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
+ accordingly.
+ Expected contents:
+ `rope_type` (`str`):
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
+ 'llama3'], with 'default' being the original RoPE implementation.
+ `factor` (`float`, *optional*):
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
+ original maximum pre-trained length.
+ `original_max_position_embeddings` (`int`, *optional*):
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
+ pretraining.
+ `attention_factor` (`float`, *optional*):
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
+ `factor` field to infer the suggested value.
+ `beta_fast` (`float`, *optional*):
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
+ ramp function. If unspecified, it defaults to 32.
+ `beta_slow` (`float`, *optional*):
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
+ ramp function. If unspecified, it defaults to 1.
+ `short_factor` (`List[float]`, *optional*):
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
+ size divided by the number of attention heads divided by 2
+ `long_factor` (`List[float]`, *optional*):
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
+ size divided by the number of attention heads divided by 2
+ `low_freq_factor` (`float`, *optional*):
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
+ `high_freq_factor` (`float`, *optional*):
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
+ sliding_window (`int`, *optional*):
+ Sliding window attention window size. If not specified, will default to `None` (no sliding window).
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ residual_dropout (`float`, *optional*, defaults to 0.0):
+ Residual connection dropout value.
+ embedding_dropout (`float`, *optional*, defaults to 0.0):
+ Embedding dropout.
+ use_bias (`bool`, *optional*, defaults to `True`):
+ Whether to use bias term on linear layers of the model.
+
+
+ ```python
+ >>> from transformers import Starcoder2Model, Starcoder2Config
+
+ >>> # Initializing a Starcoder2 7B style configuration
+ >>> configuration = Starcoder2Config()
+
+ >>> # Initializing a model from the Starcoder2 7B style configuration
+ >>> model = Starcoder2Model(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "starcoder2"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ # Default tensor parallel plan for base model `Starcoder2`
+ base_model_tp_plan = {
+ "layers.*.self_attn.q_proj": "colwise",
+ "layers.*.self_attn.k_proj": "colwise",
+ "layers.*.self_attn.v_proj": "colwise",
+ "layers.*.self_attn.o_proj": "rowwise",
+ "layers.*.mlp.c_fc": "colwise",
+ "layers.*.mlp.c_proj": "colwise",
+ }
+
+ def __init__(
+ self,
+ vocab_size=49152,
+ hidden_size=3072,
+ intermediate_size=12288,
+ num_hidden_layers=30,
+ num_attention_heads=24,
+ num_key_value_heads=2,
+ hidden_act="gelu_pytorch_tanh",
+ max_position_embeddings=4096,
+ initializer_range=0.018042,
+ norm_epsilon=1e-5,
+ use_cache=True,
+ bos_token_id=50256,
+ eos_token_id=50256,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ sliding_window=None,
+ attention_dropout=0.0,
+ residual_dropout=0.0,
+ embedding_dropout=0.0,
+ use_bias=True,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.sliding_window = sliding_window
+ self.use_bias = use_bias
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.norm_epsilon = norm_epsilon
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self.attention_dropout = attention_dropout
+ self.residual_dropout = residual_dropout
+ self.embedding_dropout = embedding_dropout
+ # Validate the correctness of rotary position embeddings parameters
+ # BC: if there is a 'type' field, move it to 'rope_type'.
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
+ rope_config_validation(self)
+
+ super().__init__(
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ **kwargs,
+ )
+
+
+__all__ = ["Starcoder2Config"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/starcoder2/modeling_starcoder2.py b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/modeling_starcoder2.py
new file mode 100644
index 0000000000000000000000000000000000000000..3668c076d24c69659e5dfde5b7f315f5438fd8f4
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/modeling_starcoder2.py
@@ -0,0 +1,1060 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/starcoder2/modular_starcoder2.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_starcoder2.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Callable, List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
+from ...generation import GenerationMixin
+from ...modeling_attn_mask_utils import AttentionMaskConverter
+from ...modeling_flash_attention_utils import FlashAttentionKwargs
+from ...modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+ SequenceClassifierOutputWithPast,
+ TokenClassifierOutput,
+)
+from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
+from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from ...processing_utils import Unpack
+from ...utils import (
+ LossKwargs,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_starcoder2 import Starcoder2Config
+
+
+logger = logging.get_logger(__name__)
+_CHECKPOINT_FOR_DOC = "bigcode/starcoder2-7b"
+_CONFIG_FOR_DOC = "Starcoder2Config"
+
+
+class Starcoder2MLP(nn.Module):
+ def __init__(self, config: Starcoder2Config):
+ super().__init__()
+ embed_dim = config.hidden_size
+ self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias)
+ self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias)
+ self.act = ACT2FN[config.hidden_act]
+ self.residual_dropout = config.residual_dropout
+
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
+ hidden_states = self.c_fc(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.c_proj(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training)
+ return hidden_states
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+def eager_attention_forward(
+ module: nn.Module,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ attention_mask: Optional[torch.Tensor],
+ scaling: float,
+ dropout: float = 0.0,
+ **kwargs,
+):
+ key_states = repeat_kv(key, module.num_key_value_groups)
+ value_states = repeat_kv(value, module.num_key_value_groups)
+
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
+ if attention_mask is not None:
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+
+ return attn_output, attn_weights
+
+
+class Starcoder2Attention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: Starcoder2Config, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
+ self.scaling = self.head_dim**-0.5
+ self.attention_dropout = config.attention_dropout
+ self.is_causal = True
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
+ self.residual_dropout = config.residual_dropout
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor],
+ past_key_value: Optional[Cache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, self.head_dim)
+
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
+ logger.warning_once(
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ else:
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ sliding_window=getattr(self.config, "sliding_window", None), # diff with Llama
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ attn_output = nn.functional.dropout(
+ attn_output, p=self.residual_dropout, training=self.training
+ ) # diff with Llama
+
+ return attn_output, attn_weights
+
+
+class Starcoder2DecoderLayer(nn.Module):
+ def __init__(self, config: Starcoder2Config, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.self_attn = Starcoder2Attention(config=config, layer_idx=layer_idx)
+ self.mlp = Starcoder2MLP(config)
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ return outputs
+
+
+class Starcoder2RotaryEmbedding(nn.Module):
+ def __init__(self, config: Starcoder2Config, device=None):
+ super().__init__()
+ # BC: "rope_type" was originally "type"
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
+ else:
+ self.rope_type = "default"
+ self.max_seq_len_cached = config.max_position_embeddings
+ self.original_max_seq_len = config.max_position_embeddings
+
+ self.config = config
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
+
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.original_inv_freq = self.inv_freq
+
+ def _dynamic_frequency_update(self, position_ids, device):
+ """
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
+ 1 - growing beyond the cached sequence length (allow scaling)
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
+ """
+ seq_len = torch.max(position_ids) + 1
+ if seq_len > self.max_seq_len_cached: # growth
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
+ self.max_seq_len_cached = seq_len
+
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
+ self.max_seq_len_cached = self.original_max_seq_len
+
+ @torch.no_grad()
+ def forward(self, x, position_ids):
+ if "dynamic" in self.rope_type:
+ self._dynamic_frequency_update(position_ids, device=x.device)
+
+ # Core RoPE block
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
+ position_ids_expanded = position_ids[:, None, :].float()
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
+ device_type = x.device.type
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False):
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos()
+ sin = emb.sin()
+
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
+ cos = cos * self.attention_scaling
+ sin = sin * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+STARCODER2_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`Starcoder2Config`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.",
+ STARCODER2_START_DOCSTRING,
+)
+class Starcoder2PreTrainedModel(PreTrainedModel):
+ config_class = Starcoder2Config
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["Starcoder2DecoderLayer"]
+ _skip_keys_device_placement = ["past_key_values"]
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_flex_attn = True
+ _supports_cache_class = True
+ _supports_quantized_cache = True
+ _supports_static_cache = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+STARCODER2_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
+
+ Two formats are allowed:
+ - a [`~cache_utils.Cache`] instance, see our
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
+ cache format.
+
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
+ legacy cache format will be returned.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
+ of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
+ the complete sequence length.
+"""
+
+
+@add_start_docstrings(
+ "The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.",
+ STARCODER2_START_DOCSTRING,
+)
+class Starcoder2Model(Starcoder2PreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Starcoder2DecoderLayer`]
+
+ Args:
+ config: Starcoder2Config
+ """
+
+ def __init__(self, config: Starcoder2Config):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
+ self.rotary_emb = Starcoder2RotaryEmbedding(config=config)
+ self.gradient_checkpointing = False
+ self.embedding_dropout = config.embedding_dropout
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if self.gradient_checkpointing and self.training and use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if use_cache and past_key_values is None:
+ past_key_values = DynamicCache()
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ causal_mask = self._update_causal_mask(
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
+ )
+
+ hidden_states = inputs_embeds
+ hidden_states = nn.functional.dropout(
+ hidden_states, p=self.embedding_dropout, training=self.training
+ ) # main diff with Llama
+
+ # create position embeddings to be shared across the decoder layers
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **flash_attn_kwargs,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ output = BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values if use_cache else None,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+ return output if return_dict else output.to_tuple()
+
+ def _update_causal_mask(
+ self,
+ attention_mask: torch.Tensor,
+ input_tensor: torch.Tensor,
+ cache_position: torch.Tensor,
+ past_key_values: Cache,
+ output_attentions: bool,
+ ):
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and past_key_values is not None:
+ is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
+ if is_padding_right:
+ raise ValueError(
+ "You are attempting to perform batched generation with padding_side='right'"
+ " this may lead to unexpected behaviour for Flash Attention version of Starcoder2. Make sure to "
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
+ )
+ if attention_mask is not None and 0.0 in attention_mask:
+ return attention_mask
+ return None
+
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
+ # to infer the attention mask.
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ using_static_cache = isinstance(past_key_values, StaticCache)
+ using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
+
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
+ if (
+ self.config._attn_implementation == "sdpa"
+ and not (using_static_cache or using_sliding_window_cache)
+ and not output_attentions
+ ):
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
+ attention_mask,
+ inputs_embeds=input_tensor,
+ past_key_values_length=past_seen_tokens,
+ sliding_window=self.config.sliding_window,
+ is_training=self.training,
+ ):
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ min_dtype = torch.finfo(dtype).min
+ sequence_length = input_tensor.shape[1]
+ # SlidingWindowCache or StaticCache
+ if using_sliding_window_cache or using_static_cache:
+ target_length = past_key_values.get_max_cache_shape()
+ # DynamicCache or no cache
+ else:
+ target_length = (
+ attention_mask.shape[-1]
+ if isinstance(attention_mask, torch.Tensor)
+ else past_seen_tokens + sequence_length + 1
+ )
+
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask,
+ sequence_length=sequence_length,
+ target_length=target_length,
+ dtype=dtype,
+ device=device,
+ cache_position=cache_position,
+ batch_size=input_tensor.shape[0],
+ config=self.config,
+ past_key_values=past_key_values,
+ )
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ and not output_attentions
+ ):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+ @staticmethod
+ def _prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask: torch.Tensor,
+ sequence_length: int,
+ target_length: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ cache_position: torch.Tensor,
+ batch_size: int,
+ config: Starcoder2Config,
+ past_key_values: Cache,
+ ):
+ """
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
+
+ Args:
+ attention_mask (`torch.Tensor`):
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
+ sequence_length (`int`):
+ The sequence length being processed.
+ target_length (`int`):
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
+ dtype (`torch.dtype`):
+ The dtype to use for the 4D attention mask.
+ device (`torch.device`):
+ The device to plcae the 4D attention mask on.
+ cache_position (`torch.Tensor`):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ batch_size (`torch.Tensor`):
+ Batch size.
+ config (`Starcoder2Config`):
+ The model's configuration class
+ past_key_values (`Cache`):
+ The cache class that is being used currently to generate
+ """
+ if attention_mask is not None and attention_mask.dim() == 4:
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
+ causal_mask = attention_mask
+ else:
+ min_dtype = torch.finfo(dtype).min
+ causal_mask = torch.full(
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
+ )
+ diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ if config.sliding_window is not None:
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
+ if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
+ sliding_attend_mask = torch.arange(target_length, device=device) <= (
+ cache_position.reshape(-1, 1) - config.sliding_window
+ )
+ diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
+ causal_mask *= diagonal_attend_mask
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ if attention_mask.shape[-1] > target_length:
+ attention_mask = attention_mask[:, :target_length]
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+ return causal_mask
+
+
+class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
+
+
+class Starcoder2ForCausalLM(Starcoder2PreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["lm_head.weight"]
+ _tp_plan = {"lm_head": "colwise_rep"}
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = Starcoder2Model(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ num_logits_to_keep: int = 0,
+ **kwargs: Unpack[KwargsForCausalLM],
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ num_logits_to_keep (`int`, *optional*):
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, Starcoder2ForCausalLM
+
+ >>> model = Starcoder2ForCausalLM.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs[0]
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The Starcoder2 Model transformer with a sequence classification head on top (linear layer).
+
+ [`Starcoder2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ STARCODER2_START_DOCSTRING,
+)
+class Starcoder2ForSequenceClassification(Starcoder2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = Starcoder2Model(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
+
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The Starcoder2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
+ """,
+ STARCODER2_START_DOCSTRING,
+)
+class Starcoder2ForTokenClassification(Starcoder2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = Starcoder2Model(config)
+ if getattr(config, "classifier_dropout", None) is not None:
+ classifier_dropout = config.classifier_dropout
+ elif getattr(config, "hidden_dropout", None) is not None:
+ classifier_dropout = config.hidden_dropout
+ else:
+ classifier_dropout = 0.1
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output)
+ logits = self.score(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(logits, labels, self.config)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+__all__ = [
+ "Starcoder2ForCausalLM",
+ "Starcoder2Model",
+ "Starcoder2PreTrainedModel",
+ "Starcoder2ForSequenceClassification",
+ "Starcoder2ForTokenClassification",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/starcoder2/modular_starcoder2.py b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/modular_starcoder2.py
new file mode 100644
index 0000000000000000000000000000000000000000..32d64cd167ba50378b682323785e7cc30e478ae3
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/starcoder2/modular_starcoder2.py
@@ -0,0 +1,274 @@
+# coding=utf-8
+# Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Starcoder2 model."""
+
+from typing import Callable, List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache
+from ...modeling_flash_attention_utils import FlashAttentionKwargs
+from ...modeling_outputs import (
+ BaseModelOutputWithPast,
+)
+from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
+from ...processing_utils import Unpack
+from ...utils import add_start_docstrings_to_model_forward, logging
+from ..mistral.modeling_mistral import (
+ MistralAttention,
+ MistralDecoderLayer,
+ MistralForCausalLM,
+ MistralForSequenceClassification,
+ MistralForTokenClassification,
+ MistralModel,
+ apply_rotary_pos_emb,
+ eager_attention_forward,
+)
+from .configuration_starcoder2 import Starcoder2Config
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "Starcoder2Config"
+_CHECKPOINT_FOR_DOC = "bigcode/starcoder2-7b"
+
+
+class Starcoder2MLP(nn.Module):
+ def __init__(self, config: Starcoder2Config):
+ super().__init__()
+ embed_dim = config.hidden_size
+ self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias)
+ self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias)
+ self.act = ACT2FN[config.hidden_act]
+ self.residual_dropout = config.residual_dropout
+
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
+ hidden_states = self.c_fc(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.c_proj(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training)
+ return hidden_states
+
+
+class Starcoder2Attention(MistralAttention):
+ def __init__(self, config: Starcoder2Config, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.residual_dropout = config.residual_dropout
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
+ attention_mask: Optional[torch.Tensor],
+ past_key_value: Optional[Cache] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ input_shape = hidden_states.shape[:-1]
+ hidden_shape = (*input_shape, -1, self.head_dim)
+
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
+ logger.warning_once(
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ else:
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ sliding_window=getattr(self.config, "sliding_window", None), # diff with Llama
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ attn_output = nn.functional.dropout(
+ attn_output, p=self.residual_dropout, training=self.training
+ ) # diff with Llama
+
+ return attn_output, attn_weights
+
+
+class Starcoder2DecoderLayer(MistralDecoderLayer):
+ def __init__(self, config: Starcoder2Config, layer_idx: int):
+ super().__init__(self)
+ self.self_attn = Starcoder2Attention(config=config, layer_idx=layer_idx)
+ self.mlp = Starcoder2MLP(config)
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
+
+
+STARCODER2_INPUTS_DOCSTRING = None # will be automatically redefined
+
+
+class Starcoder2Model(MistralModel):
+ def __init__(self, config: Starcoder2Config):
+ super().__init__(config)
+ self.layers = nn.ModuleList(
+ [Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
+ self.embedding_dropout = config.embedding_dropout
+
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if self.gradient_checkpointing and self.training and use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if use_cache and past_key_values is None:
+ past_key_values = DynamicCache()
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ causal_mask = self._update_causal_mask(
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
+ )
+
+ hidden_states = inputs_embeds
+ hidden_states = nn.functional.dropout(
+ hidden_states, p=self.embedding_dropout, training=self.training
+ ) # main diff with Llama
+
+ # create position embeddings to be shared across the decoder layers
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **flash_attn_kwargs,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ output = BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values if use_cache else None,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+ return output if return_dict else output.to_tuple()
+
+
+class Starcoder2ForCausalLM(MistralForCausalLM):
+ pass
+
+
+class Starcoder2ForSequenceClassification(MistralForSequenceClassification):
+ pass
+
+
+class Starcoder2ForTokenClassification(MistralForTokenClassification):
+ pass
+
+
+__all__ = [
+ "Starcoder2ForCausalLM",
+ "Starcoder2Model",
+ "Starcoder2PreTrainedModel", # noqa: F822
+ "Starcoder2ForSequenceClassification",
+ "Starcoder2ForTokenClassification",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vision_text_dual_encoder/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/vision_text_dual_encoder/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1639e352739838765afb7ac023a247dbb88b1f1f
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/vision_text_dual_encoder/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vision_text_dual_encoder/__pycache__/configuration_vision_text_dual_encoder.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/vision_text_dual_encoder/__pycache__/configuration_vision_text_dual_encoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3bb1c6395533c7b837daed50f1c5b4508ea1292a
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/vision_text_dual_encoder/__pycache__/configuration_vision_text_dual_encoder.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vision_text_dual_encoder/__pycache__/modeling_tf_vision_text_dual_encoder.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/vision_text_dual_encoder/__pycache__/modeling_tf_vision_text_dual_encoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..158f65514cca6f56546f6174e847177f721ab1a1
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/vision_text_dual_encoder/__pycache__/modeling_tf_vision_text_dual_encoder.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vit/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/vit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d6a7a23fa63f4c95102584b90d7f775b746ce49
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/vit/__init__.py
@@ -0,0 +1,32 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_vit import *
+ from .feature_extraction_vit import *
+ from .image_processing_vit import *
+ from .image_processing_vit_fast import *
+ from .modeling_flax_vit import *
+ from .modeling_tf_vit import *
+ from .modeling_vit import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vit/image_processing_vit.py b/janus/lib/python3.10/site-packages/transformers/models/vit/image_processing_vit.py
new file mode 100644
index 0000000000000000000000000000000000000000..afee64dc0e7872630363c167b715332b9c39d55f
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/vit/image_processing_vit.py
@@ -0,0 +1,286 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for ViT."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, filter_out_non_signature_kwargs, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ViTImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a ViT image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
+ size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
+ size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_convert_rgb (`bool`, *optional*):
+ Whether to convert the image to RGB.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: Optional[bool] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 224, "width": 224}
+ size = get_size_dict(size)
+ self.do_resize = do_resize
+ self.do_rescale = do_rescale
+ self.do_normalize = do_normalize
+ self.size = size
+ self.resample = resample
+ self.rescale_factor = rescale_factor
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ self.do_convert_rgb = do_convert_rgb
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ size = get_size_dict(size)
+ if "height" not in size or "width" not in size:
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
+ output_size = (size["height"], size["width"])
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ @filter_out_non_signature_kwargs()
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: Optional[bool] = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ do_convert_rgb: Optional[bool] = None,
+ ):
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
+ resizing.
+ resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
+ `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
+ an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to use if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to use if `do_normalize` is set to `True`.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
+ Whether to convert the image to RGB.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ resample = resample if resample is not None else self.resample
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
+
+ size = size if size is not None else self.size
+ size_dict = get_size_dict(size)
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ if do_convert_rgb:
+ images = [convert_to_rgb(image) for image in images]
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if do_rescale and is_scaled_image(images[0]):
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+
+__all__ = ["ViTImageProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vit/image_processing_vit_fast.py b/janus/lib/python3.10/site-packages/transformers/models/vit/image_processing_vit_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..5abf6cf10aa48ee0c37c38b1a51139f622788709
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/vit/image_processing_vit_fast.py
@@ -0,0 +1,303 @@
+# coding=utf-8
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fast Image processor class for ViT."""
+
+import functools
+from typing import Dict, List, Optional, Union
+
+from ...image_processing_base import BatchFeature
+from ...image_processing_utils import get_size_dict
+from ...image_processing_utils_fast import BaseImageProcessorFast, SizeDict
+from ...image_transforms import FusedRescaleNormalize, NumpyToTensor, Rescale, convert_to_rgb
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ ImageType,
+ PILImageResampling,
+ get_image_type,
+ make_list_of_images,
+ pil_torch_interpolation_mapping,
+)
+from ...utils import TensorType, logging
+from ...utils.import_utils import is_torch_available, is_torchvision_available
+
+
+logger = logging.get_logger(__name__)
+
+
+if is_torch_available():
+ import torch
+
+
+if is_torchvision_available():
+ from torchvision.transforms import Compose, Normalize, PILToTensor, Resize
+
+
+class ViTImageProcessorFast(BaseImageProcessorFast):
+ r"""
+ Constructs a ViT image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
+ size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
+ size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_convert_rgb (`bool`, *optional*):
+ Whether to convert the image to RGB.
+ """
+
+ model_input_names = ["pixel_values"]
+ _transform_params = [
+ "do_resize",
+ "do_rescale",
+ "do_normalize",
+ "size",
+ "resample",
+ "rescale_factor",
+ "image_mean",
+ "image_std",
+ "image_type",
+ ]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: Optional[bool] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 224, "width": 224}
+ size = get_size_dict(size)
+ self.do_resize = do_resize
+ self.do_rescale = do_rescale
+ self.do_normalize = do_normalize
+ self.size = size
+ self.resample = resample
+ self.rescale_factor = rescale_factor
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ self.do_convert_rgb = do_convert_rgb
+
+ def _build_transforms(
+ self,
+ do_resize: bool,
+ size: Dict[str, int],
+ resample: PILImageResampling,
+ do_rescale: bool,
+ rescale_factor: float,
+ do_normalize: bool,
+ image_mean: Union[float, List[float]],
+ image_std: Union[float, List[float]],
+ image_type: ImageType,
+ ) -> "Compose":
+ """
+ Given the input settings build the image transforms using `torchvision.transforms.Compose`.
+ """
+ transforms = []
+
+ # All PIL and numpy values need to be converted to a torch tensor
+ # to keep cross compatibility with slow image processors
+ if image_type == ImageType.PIL:
+ transforms.append(PILToTensor())
+
+ elif image_type == ImageType.NUMPY:
+ transforms.append(NumpyToTensor())
+
+ if do_resize:
+ transforms.append(
+ Resize((size["height"], size["width"]), interpolation=pil_torch_interpolation_mapping[resample])
+ )
+
+ # We can combine rescale and normalize into a single operation for speed
+ if do_rescale and do_normalize:
+ transforms.append(FusedRescaleNormalize(image_mean, image_std, rescale_factor=rescale_factor))
+ elif do_rescale:
+ transforms.append(Rescale(rescale_factor=rescale_factor))
+ elif do_normalize:
+ transforms.append(Normalize(image_mean, image_std))
+
+ return Compose(transforms)
+
+ @functools.lru_cache(maxsize=1)
+ def _validate_input_arguments(
+ self,
+ return_tensors: Union[str, TensorType],
+ do_resize: bool,
+ size: Dict[str, int],
+ resample: PILImageResampling,
+ do_rescale: bool,
+ rescale_factor: float,
+ do_normalize: bool,
+ image_mean: Union[float, List[float]],
+ image_std: Union[float, List[float]],
+ data_format: Union[str, ChannelDimension],
+ image_type: ImageType,
+ ):
+ if return_tensors != "pt":
+ raise ValueError("Only returning PyTorch tensors is currently supported.")
+
+ if data_format != ChannelDimension.FIRST:
+ raise ValueError("Only channel first data format is currently supported.")
+
+ if do_resize and None in (size, resample):
+ raise ValueError("Size and resample must be specified if do_resize is True.")
+
+ if do_rescale and rescale_factor is None:
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
+
+ if do_normalize and None in (image_mean, image_std):
+ raise ValueError("Image mean and standard deviation must be specified if do_normalize is True.")
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: Optional[bool] = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = "pt",
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ do_convert_rgb: Optional[bool] = None,
+ **kwargs,
+ ):
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
+ resizing.
+ resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
+ `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
+ an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to use if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to use if `do_normalize` is set to `True`.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Only "pt" is supported
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. The following formats are currently supported:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ do_convert_rgb (`bool`, *optional*):
+ Whether to convert the image to RGB.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ resample = resample if resample is not None else self.resample
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ size = size if size is not None else self.size
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
+ return_tensors = "pt" if return_tensors is None else return_tensors
+ # Make hashable for cache
+ size = SizeDict(**size)
+ image_mean = tuple(image_mean) if isinstance(image_mean, list) else image_mean
+ image_std = tuple(image_std) if isinstance(image_std, list) else image_std
+
+ images = make_list_of_images(images)
+ image_type = get_image_type(images[0])
+
+ if image_type not in [ImageType.PIL, ImageType.TORCH, ImageType.NUMPY]:
+ raise ValueError(f"Unsupported input image type {image_type}")
+
+ self._validate_input_arguments(
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ return_tensors=return_tensors,
+ data_format=data_format,
+ image_type=image_type,
+ )
+
+ if do_convert_rgb:
+ images = [convert_to_rgb(image) for image in images]
+
+ transforms = self.get_transforms(
+ do_resize=do_resize,
+ do_rescale=do_rescale,
+ do_normalize=do_normalize,
+ size=size,
+ resample=resample,
+ rescale_factor=rescale_factor,
+ image_mean=image_mean,
+ image_std=image_std,
+ image_type=image_type,
+ )
+ transformed_images = [transforms(image) for image in images]
+
+ data = {"pixel_values": torch.stack(transformed_images, dim=0)}
+ return BatchFeature(data, tensor_type=return_tensors)
+
+
+__all__ = ["ViTImageProcessorFast"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vit/modeling_flax_vit.py b/janus/lib/python3.10/site-packages/transformers/models/vit/modeling_flax_vit.py
new file mode 100644
index 0000000000000000000000000000000000000000..9df89b9674a1f1600ed4295dd0ca3d0505f67378
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/vit/modeling_flax_vit.py
@@ -0,0 +1,676 @@
+# coding=utf-8
+# Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+
+from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling, FlaxSequenceClassifierOutput
+from ...modeling_flax_utils import (
+ ACT2FN,
+ FlaxPreTrainedModel,
+ append_replace_return_docstrings,
+ overwrite_call_docstring,
+)
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward
+from .configuration_vit import ViTConfig
+
+
+VIT_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
+
+ This model is also a
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
+ behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`ViTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+VIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
+ for details.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class FlaxViTPatchEmbeddings(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ image_size = self.config.image_size
+ patch_size = self.config.patch_size
+ num_patches = (image_size // patch_size) * (image_size // patch_size)
+ self.num_patches = num_patches
+ self.num_channels = self.config.num_channels
+ self.projection = nn.Conv(
+ self.config.hidden_size,
+ kernel_size=(patch_size, patch_size),
+ strides=(patch_size, patch_size),
+ padding="VALID",
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.variance_scaling(
+ self.config.initializer_range**2, "fan_in", "truncated_normal"
+ ),
+ )
+
+ def __call__(self, pixel_values):
+ num_channels = pixel_values.shape[-1]
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ embeddings = self.projection(pixel_values)
+ batch_size, _, _, channels = embeddings.shape
+ return jnp.reshape(embeddings, (batch_size, -1, channels))
+
+
+class FlaxViTEmbeddings(nn.Module):
+ """Construct the CLS token, position and patch embeddings."""
+
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.cls_token = self.param(
+ "cls_token",
+ jax.nn.initializers.variance_scaling(self.config.initializer_range**2, "fan_in", "truncated_normal"),
+ (1, 1, self.config.hidden_size),
+ )
+ self.patch_embeddings = FlaxViTPatchEmbeddings(self.config, dtype=self.dtype)
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = self.param(
+ "position_embeddings",
+ jax.nn.initializers.variance_scaling(self.config.initializer_range**2, "fan_in", "truncated_normal"),
+ (1, num_patches + 1, self.config.hidden_size),
+ )
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, pixel_values, deterministic=True):
+ batch_size = pixel_values.shape[0]
+
+ embeddings = self.patch_embeddings(pixel_values)
+
+ cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size))
+ embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1)
+ embeddings = embeddings + self.position_embeddings
+ embeddings = self.dropout(embeddings, deterministic=deterministic)
+ return embeddings
+
+
+class FlaxViTSelfAttention(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
+ raise ValueError(
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`:"
+ " {self.config.num_attention_heads}"
+ )
+
+ self.query = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.variance_scaling(
+ self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal"
+ ),
+ use_bias=self.config.qkv_bias,
+ )
+ self.key = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.variance_scaling(
+ self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal"
+ ),
+ use_bias=self.config.qkv_bias,
+ )
+ self.value = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.variance_scaling(
+ self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal"
+ ),
+ use_bias=self.config.qkv_bias,
+ )
+
+ def __call__(self, hidden_states, deterministic: bool = True, output_attentions: bool = False):
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
+
+ query_states = self.query(hidden_states).reshape(
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
+ )
+ value_states = self.value(hidden_states).reshape(
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
+ )
+ key_states = self.key(hidden_states).reshape(
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
+ )
+
+ dropout_rng = None
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.config.attention_probs_dropout_prob,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ precision=None,
+ )
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
+
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
+ return outputs
+
+
+class FlaxViTSelfOutput(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.variance_scaling(
+ self.config.initializer_range**2, "fan_in", "truncated_normal"
+ ),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxViTAttention(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.attention = FlaxViTSelfAttention(self.config, dtype=self.dtype)
+ self.output = FlaxViTSelfOutput(self.config, dtype=self.dtype)
+
+ def __call__(self, hidden_states, deterministic=True, output_attentions: bool = False):
+ attn_outputs = self.attention(hidden_states, deterministic=deterministic, output_attentions=output_attentions)
+ attn_output = attn_outputs[0]
+ hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_outputs[1],)
+
+ return outputs
+
+
+class FlaxViTIntermediate(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.intermediate_size,
+ kernel_init=jax.nn.initializers.variance_scaling(
+ self.config.initializer_range**2, "fan_in", "truncated_normal"
+ ),
+ dtype=self.dtype,
+ )
+ self.activation = ACT2FN[self.config.hidden_act]
+
+ def __call__(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+class FlaxViTOutput(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.variance_scaling(
+ self.config.initializer_range**2, "fan_in", "truncated_normal"
+ ),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, hidden_states, attention_output, deterministic: bool = True):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = hidden_states + attention_output
+ return hidden_states
+
+
+class FlaxViTLayer(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.attention = FlaxViTAttention(self.config, dtype=self.dtype)
+ self.intermediate = FlaxViTIntermediate(self.config, dtype=self.dtype)
+ self.output = FlaxViTOutput(self.config, dtype=self.dtype)
+ self.layernorm_before = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.layernorm_after = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+
+ def __call__(self, hidden_states, deterministic: bool = True, output_attentions: bool = False):
+ attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ )
+
+ attention_output = attention_outputs[0]
+
+ # first residual connection
+ attention_output = attention_output + hidden_states
+
+ # in ViT, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(attention_output)
+
+ hidden_states = self.intermediate(layer_output)
+ hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attention_outputs[1],)
+ return outputs
+
+
+class FlaxViTLayerCollection(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxViTLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+
+ for i, layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ layer_outputs = layer(hidden_states, deterministic=deterministic, output_attentions=output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions += (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ outputs = (hidden_states,)
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class FlaxViTEncoder(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layer = FlaxViTLayerCollection(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return self.layer(
+ hidden_states,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+class FlaxViTPooler(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.variance_scaling(
+ self.config.initializer_range**2, "fan_in", "truncated_normal"
+ ),
+ dtype=self.dtype,
+ )
+
+ def __call__(self, hidden_states):
+ cls_hidden_state = hidden_states[:, 0]
+ cls_hidden_state = self.dense(cls_hidden_state)
+ return nn.tanh(cls_hidden_state)
+
+
+class FlaxViTPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ViTConfig
+ base_model_prefix = "vit"
+ main_input_name = "pixel_values"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: ViTConfig,
+ input_shape=None,
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ if input_shape is None:
+ input_shape = (1, config.image_size, config.image_size, config.num_channels)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ pixel_values = jnp.zeros(input_shape, dtype=self.dtype)
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(rngs, pixel_values, return_dict=False)["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def __call__(
+ self,
+ pixel_values,
+ params: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ return self.module.apply(
+ {"params": params or self.params},
+ jnp.array(pixel_values, dtype=jnp.float32),
+ not train,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ rngs=rngs,
+ )
+
+
+class FlaxViTModule(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ add_pooling_layer: bool = True
+
+ def setup(self):
+ self.embeddings = FlaxViTEmbeddings(self.config, dtype=self.dtype)
+ self.encoder = FlaxViTEncoder(self.config, dtype=self.dtype)
+ self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.pooler = FlaxViTPooler(self.config, dtype=self.dtype) if self.add_pooling_layer else None
+
+ def __call__(
+ self,
+ pixel_values,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ hidden_states = self.embeddings(pixel_values, deterministic=deterministic)
+
+ outputs = self.encoder(
+ hidden_states,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = outputs[0]
+ hidden_states = self.layernorm(hidden_states)
+ pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
+
+ if not return_dict:
+ # if pooled is None, don't return it
+ if pooled is None:
+ return (hidden_states,) + outputs[1:]
+ return (hidden_states, pooled) + outputs[1:]
+
+ return FlaxBaseModelOutputWithPooling(
+ last_hidden_state=hidden_states,
+ pooler_output=pooled,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare ViT Model transformer outputting raw hidden-states without any specific head on top.",
+ VIT_START_DOCSTRING,
+)
+class FlaxViTModel(FlaxViTPreTrainedModel):
+ module_class = FlaxViTModule
+
+
+FLAX_VISION_MODEL_DOCSTRING = """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, FlaxViTModel
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
+ >>> model = FlaxViTModel.from_pretrained("google/vit-base-patch16-224-in21k")
+
+ >>> inputs = image_processor(images=image, return_tensors="np")
+ >>> outputs = model(**inputs)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```
+"""
+
+overwrite_call_docstring(FlaxViTModel, FLAX_VISION_MODEL_DOCSTRING)
+append_replace_return_docstrings(FlaxViTModel, output_type=FlaxBaseModelOutputWithPooling, config_class=ViTConfig)
+
+
+class FlaxViTForImageClassificationModule(nn.Module):
+ config: ViTConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.vit = FlaxViTModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)
+ self.classifier = nn.Dense(
+ self.config.num_labels,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.variance_scaling(
+ self.config.initializer_range**2, "fan_in", "truncated_normal"
+ ),
+ )
+
+ def __call__(
+ self,
+ pixel_values=None,
+ deterministic: bool = True,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.vit(
+ pixel_values,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.classifier(hidden_states[:, 0, :])
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return output
+
+ return FlaxSequenceClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
+ the [CLS] token) e.g. for ImageNet.
+ """,
+ VIT_START_DOCSTRING,
+)
+class FlaxViTForImageClassification(FlaxViTPreTrainedModel):
+ module_class = FlaxViTForImageClassificationModule
+
+
+FLAX_VISION_CLASSIF_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, FlaxViTForImageClassification
+ >>> from PIL import Image
+ >>> import jax
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
+ >>> model = FlaxViTForImageClassification.from_pretrained("google/vit-base-patch16-224")
+
+ >>> inputs = image_processor(images=image, return_tensors="np")
+ >>> outputs = model(**inputs)
+ >>> logits = outputs.logits
+
+ >>> # model predicts one of the 1000 ImageNet classes
+ >>> predicted_class_idx = jax.numpy.argmax(logits, axis=-1)
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx.item()])
+ ```
+"""
+
+overwrite_call_docstring(FlaxViTForImageClassification, FLAX_VISION_CLASSIF_DOCSTRING)
+append_replace_return_docstrings(
+ FlaxViTForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=ViTConfig
+)
+
+
+__all__ = ["FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/vit/modeling_tf_vit.py b/janus/lib/python3.10/site-packages/transformers/models/vit/modeling_tf_vit.py
new file mode 100644
index 0000000000000000000000000000000000000000..780a1dc5c7bddd9a24d671993ffad51febed370d
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/vit/modeling_tf_vit.py
@@ -0,0 +1,907 @@
+# coding=utf-8
+# Copyright 2021 Google AI, Ross Wightman, The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""TF 2.0 ViT model."""
+
+from __future__ import annotations
+
+import collections.abc
+import math
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput
+from ...modeling_tf_utils import (
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import shape_list, stable_softmax
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_vit import ViTConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "ViTConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "google/vit-base-patch16-224-in21k"
+_EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "google/vit-base-patch16-224"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat"
+
+
+class TFViTEmbeddings(keras.layers.Layer):
+ """
+ Construct the CLS token, position and patch embeddings.
+
+ """
+
+ def __init__(self, config: ViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.patch_embeddings = TFViTPatchEmbeddings(config, name="patch_embeddings")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def build(self, input_shape=None):
+ num_patches = self.patch_embeddings.num_patches
+ self.cls_token = self.add_weight(
+ shape=(1, 1, self.config.hidden_size),
+ initializer=get_initializer(self.config.initializer_range),
+ trainable=True,
+ name="cls_token",
+ )
+ self.position_embeddings = self.add_weight(
+ shape=(1, num_patches + 1, self.config.hidden_size),
+ initializer=get_initializer(self.config.initializer_range),
+ trainable=True,
+ name="position_embeddings",
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "patch_embeddings", None) is not None:
+ with tf.name_scope(self.patch_embeddings.name):
+ self.patch_embeddings.build(None)
+
+ def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images.
+
+ Source:
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
+ """
+
+ batch_size, seq_len, dim = shape_list(embeddings)
+ num_patches = seq_len - 1
+
+ _, num_positions, _ = shape_list(self.position_embeddings)
+ num_positions -= 1
+
+ if num_patches == num_positions and height == width:
+ return self.position_embeddings
+ class_pos_embed = self.position_embeddings[:, :1]
+ patch_pos_embed = self.position_embeddings[:, 1:]
+ h0 = height // self.config.patch_size
+ w0 = width // self.config.patch_size
+ patch_pos_embed = tf.image.resize(
+ images=tf.reshape(
+ patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
+ ),
+ size=(h0, w0),
+ method="bicubic",
+ )
+
+ shape = shape_list(patch_pos_embed)
+ assert h0 == shape[-3] and w0 == shape[-2]
+ patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim))
+ return tf.concat(values=(class_pos_embed, patch_pos_embed), axis=1)
+
+ def call(
+ self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False
+ ) -> tf.Tensor:
+ batch_size, num_channels, height, width = shape_list(pixel_values)
+ embeddings = self.patch_embeddings(
+ pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, training=training
+ )
+
+ # add the [CLS] token to the embedded patch tokens
+ cls_tokens = tf.repeat(self.cls_token, repeats=batch_size, axis=0)
+ embeddings = tf.concat((cls_tokens, embeddings), axis=1)
+
+ # add positional encoding to each token
+ if interpolate_pos_encoding:
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ embeddings = embeddings + self.position_embeddings
+
+ embeddings = self.dropout(embeddings, training=training)
+
+ return embeddings
+
+
+# Based on timm implementation, which can be found here:
+# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
+class TFViTPatchEmbeddings(keras.layers.Layer):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config: ViTConfig, **kwargs):
+ super().__init__(**kwargs)
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_patches = num_patches
+ self.num_channels = num_channels
+ self.config = config
+
+ self.projection = keras.layers.Conv2D(
+ filters=hidden_size,
+ kernel_size=patch_size,
+ strides=patch_size,
+ padding="valid",
+ data_format="channels_last",
+ use_bias=True,
+ kernel_initializer=get_initializer(self.config.initializer_range),
+ bias_initializer="zeros",
+ name="projection",
+ )
+
+ def call(
+ self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False
+ ) -> tf.Tensor:
+ batch_size, num_channels, height, width = shape_list(pixel_values)
+ if tf.executing_eagerly() and num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ if not interpolate_pos_encoding:
+ if tf.executing_eagerly():
+ if height != self.image_size[0] or width != self.image_size[1]:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model"
+ f" ({self.image_size[0]}*{self.image_size[1]})."
+ )
+
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
+ # So change the input format from `NCHW` to `NHWC`.
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
+
+ projection = self.projection(pixel_values)
+
+ # Change the 2D spatial dimensions to a single temporal dimension.
+ # shape = (batch_size, num_patches, out_channels=embed_dim)
+ num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0])
+ embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, -1))
+
+ return embeddings
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "projection", None) is not None:
+ with tf.name_scope(self.projection.name):
+ self.projection.build([None, None, None, self.num_channels])
+
+
+class TFViTSelfAttention(keras.layers.Layer):
+ def __init__(self, config: ViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
+ f"of attention heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
+
+ self.query = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
+ )
+ self.key = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
+ )
+ self.value = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
+ )
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
+ self.config = config
+
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
+
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ batch_size = shape_list(hidden_states)[0]
+ mixed_query_layer = self.query(inputs=hidden_states)
+ mixed_key_layer = self.key(inputs=hidden_states)
+ mixed_value_layer = self.value(inputs=hidden_states)
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ # (batch size, num_heads, seq_len_q, seq_len_k)
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
+ attention_scores = tf.divide(attention_scores, dk)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = tf.multiply(attention_probs, head_mask)
+
+ attention_output = tf.matmul(attention_probs, value_layer)
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
+
+ # (batch_size, seq_len_q, all_head_size)
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.config.hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.config.hidden_size])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build([None, None, self.config.hidden_size])
+
+
+class TFViTSelfOutput(keras.layers.Layer):
+ """
+ The residual connection is defined in TFViTLayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: ViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+class TFViTAttention(keras.layers.Layer):
+ def __init__(self, config: ViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.self_attention = TFViTSelfAttention(config, name="attention")
+ self.dense_output = TFViTSelfOutput(config, name="output")
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(
+ self,
+ input_tensor: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ self_outputs = self.self_attention(
+ hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, training=training
+ )
+ attention_output = self.dense_output(
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
+ )
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attention", None) is not None:
+ with tf.name_scope(self.self_attention.name):
+ self.self_attention.build(None)
+ if getattr(self, "dense_output", None) is not None:
+ with tf.name_scope(self.dense_output.name):
+ self.dense_output.build(None)
+
+
+class TFViTIntermediate(keras.layers.Layer):
+ def __init__(self, config: ViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+class TFViTOutput(keras.layers.Layer):
+ def __init__(self, config: ViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = hidden_states + input_tensor
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+
+
+class TFViTLayer(keras.layers.Layer):
+ """This corresponds to the Block class in the timm implementation."""
+
+ def __init__(self, config: ViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attention = TFViTAttention(config, name="attention")
+ self.intermediate = TFViTIntermediate(config, name="intermediate")
+ self.vit_output = TFViTOutput(config, name="output")
+
+ self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before")
+ self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ attention_outputs = self.attention(
+ # in ViT, layernorm is applied before self-attention
+ input_tensor=self.layernorm_before(inputs=hidden_states),
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ attention_output = attention_outputs[0]
+
+ # first residual connection
+ hidden_states = attention_output + hidden_states
+
+ # in ViT, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(inputs=hidden_states)
+
+ intermediate_output = self.intermediate(hidden_states=layer_output)
+
+ # second residual connection is done here
+ layer_output = self.vit_output(
+ hidden_states=intermediate_output, input_tensor=hidden_states, training=training
+ )
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "vit_output", None) is not None:
+ with tf.name_scope(self.vit_output.name):
+ self.vit_output.build(None)
+ if getattr(self, "layernorm_before", None) is not None:
+ with tf.name_scope(self.layernorm_before.name):
+ self.layernorm_before.build([None, None, self.config.hidden_size])
+ if getattr(self, "layernorm_after", None) is not None:
+ with tf.name_scope(self.layernorm_after.name):
+ self.layernorm_after.build([None, None, self.config.hidden_size])
+
+
+class TFViTEncoder(keras.layers.Layer):
+ def __init__(self, config: ViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.layer = [TFViTLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ head_mask=head_mask[i],
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFViTMainLayer(keras.layers.Layer):
+ config_class = ViTConfig
+
+ def __init__(self, config: ViTConfig, add_pooling_layer: bool = True, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+
+ self.embeddings = TFViTEmbeddings(config, name="embeddings")
+ self.encoder = TFViTEncoder(config, name="encoder")
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
+ self.pooler = TFViTPooler(config, name="pooler") if add_pooling_layer else None
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ embedding_output = self.embeddings(
+ pixel_values=pixel_values,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ training=training,
+ )
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.config.num_hidden_layers
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(inputs=sequence_output)
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, self.config.hidden_size])
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+
+
+class TFViTPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ViTConfig
+ base_model_prefix = "vit"
+ main_input_name = "pixel_values"
+
+
+VIT_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Args:
+ config ([`ViTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+VIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
+ for details.
+
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ interpolate_pos_encoding (`bool`, *optional*):
+ Whether to interpolate the pre-trained position encodings.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False``):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare ViT Model transformer outputting raw hidden-states without any specific head on top.",
+ VIT_START_DOCSTRING,
+)
+class TFViTModel(TFViTPreTrainedModel):
+ def __init__(self, config: ViTConfig, *inputs, add_pooling_layer=True, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.vit = TFViTMainLayer(config, add_pooling_layer=add_pooling_layer, name="vit")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ outputs = self.vit(
+ pixel_values=pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "vit", None) is not None:
+ with tf.name_scope(self.vit.name):
+ self.vit.build(None)
+
+
+class TFViTPooler(keras.layers.Layer):
+ def __init__(self, config: ViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(inputs=first_token_tensor)
+
+ return pooled_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
+ the [CLS] token) e.g. for ImageNet.
+
+
+
+ Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
+ setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
+ position embeddings to the higher resolution.
+
+
+ """,
+ VIT_START_DOCSTRING,
+)
+class TFViTForImageClassification(TFViTPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config: ViTConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+ self.vit = TFViTMainLayer(config, add_pooling_layer=False, name="vit")
+
+ # Classifier head
+ self.classifier = keras.layers.Dense(
+ units=config.num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="classifier",
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+
+ outputs = self.vit(
+ pixel_values=pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(inputs=sequence_output[:, 0, :])
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "vit", None) is not None:
+ with tf.name_scope(self.vit.name):
+ self.vit.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+__all__ = ["TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py b/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..b51a9340dbfea0013235b72b17eb93d95385245e
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py
@@ -0,0 +1,198 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+"""Tokenization classes for XLM-RoBERTa model."""
+
+import os
+from shutil import copyfile
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import AddedToken
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import is_sentencepiece_available, logging
+
+
+if is_sentencepiece_available():
+ from .tokenization_xlm_roberta import XLMRobertaTokenizer
+else:
+ XLMRobertaTokenizer = None
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
+
+
+class XLMRobertaTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" XLM-RoBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
+ [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["NOTUSED", "NOTUSED"]`):
+ Additional special tokens used by the tokenizer.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = XLMRobertaTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ **kwargs,
+ ):
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ **kwargs,
+ )
+
+ self.vocab_file = vocab_file
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
+ not make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)
+
+
+__all__ = ["XLMRobertaTokenizerFast"]